rebase: update kubernetes to v1.20.0

updated kubernetes packages to latest
release.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2020-12-17 17:58:29 +05:30
committed by mergify[bot]
parent 4abe128bd8
commit 83559144b1
1624 changed files with 247222 additions and 160270 deletions

View File

@ -1,310 +1,274 @@
{
"Rules": [
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/",
"AllowedPrefixes": [
"k8s.io/kubernetes/pkg/api/legacyscheme",
"k8s.io/kubernetes/pkg/api/service",
"k8s.io/kubernetes/pkg/api/v1/pod",
"k8s.io/kubernetes/pkg/api/v1/resource",
"k8s.io/kubernetes/pkg/api/v1/service",
"k8s.io/kubernetes/pkg/apis/apps",
"k8s.io/kubernetes/pkg/apis/apps/validation",
"k8s.io/kubernetes/pkg/apis/autoscaling",
"k8s.io/kubernetes/pkg/apis/batch",
"k8s.io/kubernetes/pkg/apis/core",
"k8s.io/kubernetes/pkg/apis/core/helper",
"k8s.io/kubernetes/pkg/apis/core/install",
"k8s.io/kubernetes/pkg/apis/core/pods",
"k8s.io/kubernetes/pkg/apis/core/v1",
"k8s.io/kubernetes/pkg/apis/core/v1/helper",
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos",
"k8s.io/kubernetes/pkg/apis/core/validation",
"k8s.io/kubernetes/pkg/apis/extensions",
"k8s.io/kubernetes/pkg/apis/networking",
"k8s.io/kubernetes/pkg/apis/policy",
"k8s.io/kubernetes/pkg/apis/policy/validation",
"k8s.io/kubernetes/pkg/apis/scheduling",
"k8s.io/kubernetes/pkg/apis/storage/v1/util",
"k8s.io/kubernetes/pkg/capabilities",
"k8s.io/kubernetes/pkg/client/conditions",
"k8s.io/kubernetes/pkg/cloudprovider/providers",
"k8s.io/kubernetes/pkg/controller",
"k8s.io/kubernetes/pkg/controller/deployment/util",
"k8s.io/kubernetes/pkg/controller/nodelifecycle",
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler",
"k8s.io/kubernetes/pkg/controller/service",
"k8s.io/kubernetes/pkg/controller/util/node",
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util",
"k8s.io/kubernetes/pkg/controller/volume/scheduling",
"k8s.io/kubernetes/pkg/credentialprovider",
"k8s.io/kubernetes/pkg/credentialprovider/aws",
"k8s.io/kubernetes/pkg/credentialprovider/azure",
"k8s.io/kubernetes/pkg/credentialprovider/gcp",
"k8s.io/kubernetes/pkg/credentialprovider/secrets",
"k8s.io/kubernetes/pkg/features",
"k8s.io/kubernetes/pkg/fieldpath",
"k8s.io/kubernetes/pkg/kubectl",
"k8s.io/kubernetes/pkg/kubectl/apps",
"k8s.io/kubernetes/pkg/kubectl/describe",
"k8s.io/kubernetes/pkg/kubectl/describe/versioned",
"k8s.io/kubernetes/pkg/kubectl/scheme",
"k8s.io/kubernetes/pkg/kubectl/util",
"k8s.io/kubernetes/pkg/kubectl/util/certificate",
"k8s.io/kubernetes/pkg/kubectl/util/deployment",
"k8s.io/kubernetes/pkg/kubectl/util/event",
"k8s.io/kubernetes/pkg/kubectl/util/fieldpath",
"k8s.io/kubernetes/pkg/kubectl/util/podutils",
"k8s.io/kubernetes/pkg/kubectl/util/qos",
"k8s.io/kubernetes/pkg/kubectl/util/rbac",
"k8s.io/kubernetes/pkg/kubectl/util/resource",
"k8s.io/kubernetes/pkg/kubectl/util/slice",
"k8s.io/kubernetes/pkg/kubectl/util/storage",
"k8s.io/kubernetes/pkg/kubelet",
"k8s.io/kubernetes/pkg/kubelet/apis",
"k8s.io/kubernetes/pkg/kubelet/apis/config",
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1",
"k8s.io/kubernetes/pkg/kubelet/cadvisor",
"k8s.io/kubernetes/pkg/kubelet/certificate",
"k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap",
"k8s.io/kubernetes/pkg/kubelet/checkpoint",
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager",
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum",
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors",
"k8s.io/kubernetes/pkg/kubelet/cloudresource",
"k8s.io/kubernetes/pkg/kubelet/cm",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology",
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset",
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager",
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint",
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager",
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask",
"k8s.io/kubernetes/pkg/kubelet/cm/util",
"k8s.io/kubernetes/pkg/kubelet/config",
"k8s.io/kubernetes/pkg/kubelet/configmap",
"k8s.io/kubernetes/pkg/kubelet/container",
"k8s.io/kubernetes/pkg/kubelet/dockershim",
"k8s.io/kubernetes/pkg/kubelet/dockershim/cm",
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker",
"k8s.io/kubernetes/pkg/kubelet/dockershim/metrics",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics",
"k8s.io/kubernetes/pkg/kubelet/dockershim/remote",
"k8s.io/kubernetes/pkg/kubelet/envvars",
"k8s.io/kubernetes/pkg/kubelet/eviction",
"k8s.io/kubernetes/pkg/kubelet/eviction/api",
"k8s.io/kubernetes/pkg/kubelet/events",
"k8s.io/kubernetes/pkg/kubelet/images",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic",
"k8s.io/kubernetes/pkg/kubelet/kuberuntime",
"k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs",
"k8s.io/kubernetes/pkg/kubelet/leaky",
"k8s.io/kubernetes/pkg/kubelet/lifecycle",
"k8s.io/kubernetes/pkg/kubelet/logs",
"k8s.io/kubernetes/pkg/kubelet/metrics",
"k8s.io/kubernetes/pkg/kubelet/network/dns",
"k8s.io/kubernetes/pkg/kubelet/nodelease",
"k8s.io/kubernetes/pkg/kubelet/nodestatus",
"k8s.io/kubernetes/pkg/kubelet/oom",
"k8s.io/kubernetes/pkg/kubelet/pleg",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler",
"k8s.io/kubernetes/pkg/kubelet/pod",
"k8s.io/kubernetes/pkg/kubelet/preemption",
"k8s.io/kubernetes/pkg/kubelet/prober",
"k8s.io/kubernetes/pkg/kubelet/prober/results",
"k8s.io/kubernetes/pkg/kubelet/qos",
"k8s.io/kubernetes/pkg/kubelet/remote",
"k8s.io/kubernetes/pkg/kubelet/runtimeclass",
"k8s.io/kubernetes/pkg/kubelet/server",
"k8s.io/kubernetes/pkg/kubelet/server/metrics",
"k8s.io/kubernetes/pkg/kubelet/server/portforward",
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand",
"k8s.io/kubernetes/pkg/kubelet/server/stats",
"k8s.io/kubernetes/pkg/kubelet/server/streaming",
"k8s.io/kubernetes/pkg/kubelet/stats",
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit",
"k8s.io/kubernetes/pkg/kubelet/status",
"k8s.io/kubernetes/pkg/kubelet/secret",
"k8s.io/kubernetes/pkg/kubelet/sysctl",
"k8s.io/kubernetes/pkg/kubelet/types",
"k8s.io/kubernetes/pkg/kubelet/token",
"k8s.io/kubernetes/pkg/kubelet/util",
"k8s.io/kubernetes/pkg/kubelet/util/format",
"k8s.io/kubernetes/pkg/kubelet/util/manager",
"k8s.io/kubernetes/pkg/kubelet/util/store",
"k8s.io/kubernetes/pkg/kubelet/volumemanager",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler",
"k8s.io/kubernetes/pkg/kubemark",
"k8s.io/kubernetes/pkg/master/ports",
"k8s.io/kubernetes/pkg/probe",
"k8s.io/kubernetes/pkg/probe/exec",
"k8s.io/kubernetes/pkg/probe/http",
"k8s.io/kubernetes/pkg/probe/tcp",
"k8s.io/kubernetes/pkg/proxy",
"k8s.io/kubernetes/pkg/proxy/apis",
"k8s.io/kubernetes/pkg/proxy/apis/config",
"k8s.io/kubernetes/pkg/proxy/apis/config/scheme",
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1",
"k8s.io/kubernetes/pkg/proxy/apis/config/validation",
"k8s.io/kubernetes/pkg/proxy/config",
"k8s.io/kubernetes/pkg/proxy/healthcheck",
"k8s.io/kubernetes/pkg/proxy/iptables",
"k8s.io/kubernetes/pkg/proxy/ipvs",
"k8s.io/kubernetes/pkg/proxy/metaproxier",
"k8s.io/kubernetes/pkg/proxy/metrics",
"k8s.io/kubernetes/pkg/proxy/userspace",
"k8s.io/kubernetes/pkg/proxy/util",
"k8s.io/kubernetes/pkg/registry/core/service/allocator",
"k8s.io/kubernetes/pkg/registry/core/service/portallocator",
"k8s.io/kubernetes/pkg/scheduler/api",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
"k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1",
"k8s.io/kubernetes/pkg/scheduler/listers",
"k8s.io/kubernetes/pkg/scheduler/metrics",
"k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/scheduler/util",
"k8s.io/kubernetes/pkg/scheduler/volumebinder",
"k8s.io/kubernetes/pkg/security/apparmor",
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp",
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl",
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/util",
"k8s.io/kubernetes/pkg/securitycontext",
"k8s.io/kubernetes/pkg/serviceaccount",
"k8s.io/kubernetes/pkg/ssh",
"k8s.io/kubernetes/pkg/util/async",
"k8s.io/kubernetes/pkg/util/bandwidth",
"k8s.io/kubernetes/pkg/util/config",
"k8s.io/kubernetes/pkg/util/configz",
"k8s.io/kubernetes/pkg/util/conntrack",
"k8s.io/kubernetes/pkg/util/ebtables",
"k8s.io/kubernetes/pkg/util/env",
"k8s.io/kubernetes/pkg/util/filesystem",
"k8s.io/kubernetes/pkg/util/flag",
"k8s.io/kubernetes/pkg/util/flock",
"k8s.io/kubernetes/pkg/util/goroutinemap",
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff",
"k8s.io/kubernetes/pkg/util/hash",
"k8s.io/kubernetes/pkg/util/ipset",
"k8s.io/kubernetes/pkg/util/iptables",
"k8s.io/kubernetes/pkg/util/ipvs",
"k8s.io/kubernetes/pkg/util/labels",
"k8s.io/kubernetes/pkg/util/node",
"k8s.io/kubernetes/pkg/util/oom",
"k8s.io/kubernetes/pkg/util/parsers",
"k8s.io/kubernetes/pkg/util/pod",
"k8s.io/kubernetes/pkg/util/procfs",
"k8s.io/kubernetes/pkg/util/removeall",
"k8s.io/kubernetes/pkg/util/resizefs",
"k8s.io/kubernetes/pkg/util/rlimit",
"k8s.io/kubernetes/pkg/util/selinux",
"k8s.io/kubernetes/pkg/util/slice",
"k8s.io/kubernetes/pkg/util/sysctl",
"k8s.io/kubernetes/pkg/util/system",
"k8s.io/kubernetes/pkg/util/tail",
"k8s.io/kubernetes/pkg/util/taints",
"k8s.io/kubernetes/pkg/volume",
"k8s.io/kubernetes/pkg/volume/util",
"k8s.io/kubernetes/pkg/volume/util/fs",
"k8s.io/kubernetes/pkg/volume/util/fsquota",
"k8s.io/kubernetes/pkg/volume/util/recyclerclient",
"k8s.io/kubernetes/pkg/volume/util/subpath",
"k8s.io/kubernetes/pkg/volume/util/types",
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/kubernetes/test/",
"AllowedPrefixes": [
"k8s.io/kubernetes/test/e2e/framework",
"k8s.io/kubernetes/test/e2e/framework/auth",
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper",
"k8s.io/kubernetes/test/e2e/framework/kubectl",
"k8s.io/kubernetes/test/e2e/framework/log",
"k8s.io/kubernetes/test/e2e/framework/metrics",
"k8s.io/kubernetes/test/e2e/framework/network",
"k8s.io/kubernetes/test/e2e/framework/node",
"k8s.io/kubernetes/test/e2e/framework/pod",
"k8s.io/kubernetes/test/e2e/framework/rc",
"k8s.io/kubernetes/test/e2e/framework/resource",
"k8s.io/kubernetes/test/e2e/framework/service",
"k8s.io/kubernetes/test/e2e/framework/ssh",
"k8s.io/kubernetes/test/e2e/framework/testfiles",
"k8s.io/kubernetes/test/e2e/manifest",
"k8s.io/kubernetes/test/e2e/perftype",
"k8s.io/kubernetes/test/e2e/storage/utils",
"k8s.io/kubernetes/test/e2e/system",
"k8s.io/kubernetes/test/utils",
"k8s.io/kubernetes/test/utils/image"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/kubernetes/third_party/",
"AllowedPrefixes": [
"k8s.io/kubernetes/third_party/forked/golang/expansion",
"k8s.io/kubernetes/third_party/forked/ipvs"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/utils/",
"AllowedPrefixes": [
"k8s.io/utils/buffer",
"k8s.io/utils/exec",
"k8s.io/utils/inotify",
"k8s.io/utils/integer",
"k8s.io/utils/io",
"k8s.io/utils/keymutex",
"k8s.io/utils/mount",
"k8s.io/utils/net",
"k8s.io/utils/nsenter",
"k8s.io/utils/path",
"k8s.io/utils/pointer",
"k8s.io/utils/strings",
"k8s.io/utils/trace"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/(api/|apimachinery/|apiextensions-apiserver/|apiserver/)",
"AllowedPrefixes": [
""
]
},
{
"SelectorRegexp": "k8s[.]io/client-go/",
"AllowedPrefixes": [
""
]
}
]
}
rules:
- selectorRegexp: k8s[.]io/kubernetes/pkg/
allowedPrefixes:
- k8s.io/kubernetes/pkg/api/legacyscheme
- k8s.io/kubernetes/pkg/api/service
- k8s.io/kubernetes/pkg/api/v1/pod
- k8s.io/kubernetes/pkg/api/v1/resource
- k8s.io/kubernetes/pkg/api/v1/service
- k8s.io/kubernetes/pkg/api/pod
- k8s.io/kubernetes/pkg/apis/apps
- k8s.io/kubernetes/pkg/apis/apps/validation
- k8s.io/kubernetes/pkg/apis/autoscaling
- k8s.io/kubernetes/pkg/apis/batch
- k8s.io/kubernetes/pkg/apis/certificates
- k8s.io/kubernetes/pkg/apis/certificates/v1
- k8s.io/kubernetes/pkg/apis/core
- k8s.io/kubernetes/pkg/apis/core/helper
- k8s.io/kubernetes/pkg/apis/core/install
- k8s.io/kubernetes/pkg/apis/core/pods
- k8s.io/kubernetes/pkg/apis/core/v1
- k8s.io/kubernetes/pkg/apis/core/v1/helper
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
- k8s.io/kubernetes/pkg/apis/core/validation
- k8s.io/kubernetes/pkg/apis/extensions
- k8s.io/kubernetes/pkg/apis/networking
- k8s.io/kubernetes/pkg/apis/policy
- k8s.io/kubernetes/pkg/apis/policy/validation
- k8s.io/kubernetes/pkg/apis/scheduling
- k8s.io/kubernetes/pkg/apis/storage/v1/util
- k8s.io/kubernetes/pkg/capabilities
- k8s.io/kubernetes/pkg/client/conditions
- k8s.io/kubernetes/pkg/cloudprovider/providers
- k8s.io/kubernetes/pkg/controller
- k8s.io/kubernetes/pkg/controller/deployment/util
- k8s.io/kubernetes/pkg/controller/nodelifecycle
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/controller/volume/scheduling
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
- k8s.io/kubernetes/pkg/credentialprovider/gcp
- k8s.io/kubernetes/pkg/credentialprovider/secrets
- k8s.io/kubernetes/pkg/features
- k8s.io/kubernetes/pkg/fieldpath
- k8s.io/kubernetes/pkg/kubectl
- k8s.io/kubernetes/pkg/kubectl/apps
- k8s.io/kubernetes/pkg/kubectl/describe
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
- k8s.io/kubernetes/pkg/kubectl/scheme
- k8s.io/kubernetes/pkg/kubectl/util
- k8s.io/kubernetes/pkg/kubectl/util/certificate
- k8s.io/kubernetes/pkg/kubectl/util/deployment
- k8s.io/kubernetes/pkg/kubectl/util/event
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
- k8s.io/kubernetes/pkg/kubectl/util/podutils
- k8s.io/kubernetes/pkg/kubectl/util/qos
- k8s.io/kubernetes/pkg/kubectl/util/rbac
- k8s.io/kubernetes/pkg/kubectl/util/resource
- k8s.io/kubernetes/pkg/kubectl/util/slice
- k8s.io/kubernetes/pkg/kubectl/util/storage
- k8s.io/kubernetes/pkg/kubelet
- k8s.io/kubernetes/pkg/kubelet/apis
- k8s.io/kubernetes/pkg/kubelet/apis/config
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
- k8s.io/kubernetes/pkg/kubelet/cadvisor
- k8s.io/kubernetes/pkg/kubelet/certificate
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
- k8s.io/kubernetes/pkg/kubelet/checkpoint
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
- k8s.io/kubernetes/pkg/kubelet/cloudresource
- k8s.io/kubernetes/pkg/kubelet/cm
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
- k8s.io/kubernetes/pkg/kubelet/cm/util
- k8s.io/kubernetes/pkg/kubelet/config
- k8s.io/kubernetes/pkg/kubelet/configmap
- k8s.io/kubernetes/pkg/kubelet/container
- k8s.io/kubernetes/pkg/kubelet/dockershim
- k8s.io/kubernetes/pkg/kubelet/dockershim/cm
- k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
- k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/network
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/remote
- k8s.io/kubernetes/pkg/kubelet/envvars
- k8s.io/kubernetes/pkg/kubelet/eviction
- k8s.io/kubernetes/pkg/kubelet/eviction/api
- k8s.io/kubernetes/pkg/kubelet/events
- k8s.io/kubernetes/pkg/kubelet/images
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
- k8s.io/kubernetes/pkg/kubelet/leaky
- k8s.io/kubernetes/pkg/kubelet/lifecycle
- k8s.io/kubernetes/pkg/kubelet/logs
- k8s.io/kubernetes/pkg/kubelet/metrics
- k8s.io/kubernetes/pkg/kubelet/network/dns
- k8s.io/kubernetes/pkg/kubelet/nodelease
- k8s.io/kubernetes/pkg/kubelet/nodestatus
- k8s.io/kubernetes/pkg/kubelet/oom
- k8s.io/kubernetes/pkg/kubelet/pleg
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
- k8s.io/kubernetes/pkg/kubelet/pod
- k8s.io/kubernetes/pkg/kubelet/preemption
- k8s.io/kubernetes/pkg/kubelet/prober
- k8s.io/kubernetes/pkg/kubelet/prober/results
- k8s.io/kubernetes/pkg/kubelet/qos
- k8s.io/kubernetes/pkg/kubelet/remote
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
- k8s.io/kubernetes/pkg/kubelet/server
- k8s.io/kubernetes/pkg/kubelet/server/metrics
- k8s.io/kubernetes/pkg/kubelet/server/portforward
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
- k8s.io/kubernetes/pkg/kubelet/server/stats
- k8s.io/kubernetes/pkg/kubelet/server/streaming
- k8s.io/kubernetes/pkg/kubelet/stats
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
- k8s.io/kubernetes/pkg/kubelet/status
- k8s.io/kubernetes/pkg/kubelet/secret
- k8s.io/kubernetes/pkg/kubelet/sysctl
- k8s.io/kubernetes/pkg/kubelet/types
- k8s.io/kubernetes/pkg/kubelet/token
- k8s.io/kubernetes/pkg/kubelet/util
- k8s.io/kubernetes/pkg/kubelet/util/format
- k8s.io/kubernetes/pkg/kubelet/util/manager
- k8s.io/kubernetes/pkg/kubelet/util/store
- k8s.io/kubernetes/pkg/kubelet/volumemanager
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
- k8s.io/kubernetes/pkg/kubemark
- k8s.io/kubernetes/pkg/cluster/ports
- k8s.io/kubernetes/pkg/probe
- k8s.io/kubernetes/pkg/probe/exec
- k8s.io/kubernetes/pkg/probe/http
- k8s.io/kubernetes/pkg/probe/tcp
- k8s.io/kubernetes/pkg/proxy
- k8s.io/kubernetes/pkg/proxy/apis
- k8s.io/kubernetes/pkg/proxy/apis/config
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
- k8s.io/kubernetes/pkg/proxy/config
- k8s.io/kubernetes/pkg/proxy/healthcheck
- k8s.io/kubernetes/pkg/proxy/iptables
- k8s.io/kubernetes/pkg/proxy/ipvs
- k8s.io/kubernetes/pkg/proxy/metaproxier
- k8s.io/kubernetes/pkg/proxy/metrics
- k8s.io/kubernetes/pkg/proxy/userspace
- k8s.io/kubernetes/pkg/proxy/util
- k8s.io/kubernetes/pkg/registry/core/service/allocator
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
- k8s.io/kubernetes/pkg/scheduler/api
- k8s.io/kubernetes/pkg/scheduler/framework
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util
- k8s.io/kubernetes/pkg/scheduler/volumebinder
- k8s.io/kubernetes/pkg/security/apparmor
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
- k8s.io/kubernetes/pkg/securitycontext
- k8s.io/kubernetes/pkg/serviceaccount
- k8s.io/kubernetes/pkg/util/async
- k8s.io/kubernetes/pkg/util/bandwidth
- k8s.io/kubernetes/pkg/util/config
- k8s.io/kubernetes/pkg/util/configz
- k8s.io/kubernetes/pkg/util/conntrack
- k8s.io/kubernetes/pkg/util/env
- k8s.io/kubernetes/pkg/util/filesystem
- k8s.io/kubernetes/pkg/util/flag
- k8s.io/kubernetes/pkg/util/flock
- k8s.io/kubernetes/pkg/util/goroutinemap
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
- k8s.io/kubernetes/pkg/util/hash
- k8s.io/kubernetes/pkg/util/ipset
- k8s.io/kubernetes/pkg/util/iptables
- k8s.io/kubernetes/pkg/util/ipvs
- k8s.io/kubernetes/pkg/util/labels
- k8s.io/kubernetes/pkg/util/node
- k8s.io/kubernetes/pkg/util/oom
- k8s.io/kubernetes/pkg/util/parsers
- k8s.io/kubernetes/pkg/util/pod
- k8s.io/kubernetes/pkg/util/procfs
- k8s.io/kubernetes/pkg/util/removeall
- k8s.io/kubernetes/pkg/util/resizefs
- k8s.io/kubernetes/pkg/util/rlimit
- k8s.io/kubernetes/pkg/util/selinux
- k8s.io/kubernetes/pkg/util/slice
- k8s.io/kubernetes/pkg/util/sysctl
- k8s.io/kubernetes/pkg/util/system
- k8s.io/kubernetes/pkg/util/tail
- k8s.io/kubernetes/pkg/util/taints
- k8s.io/kubernetes/pkg/volume
- k8s.io/kubernetes/pkg/volume/util
- k8s.io/kubernetes/pkg/volume/util/fs
- k8s.io/kubernetes/pkg/volume/util/fsquota
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
- k8s.io/kubernetes/pkg/volume/util/subpath
- k8s.io/kubernetes/pkg/volume/util/types
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
- k8s.io/kubernetes/pkg/apis/authentication
- k8s.io/kubernetes/pkg/apis/authentication/v1
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
- k8s.io/kubernetes/pkg/apis/storage/v1
- k8s.io/kubernetes/pkg/scheduler/internal/cache
- selectorRegexp: k8s[.]io/kubernetes/test/
allowedPrefixes:
- k8s.io/kubernetes/test/e2e/framework
- k8s.io/kubernetes/test/e2e/framework/auth
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
- k8s.io/kubernetes/test/e2e/framework/kubectl
- k8s.io/kubernetes/test/e2e/framework/log
- k8s.io/kubernetes/test/e2e/framework/metrics
- k8s.io/kubernetes/test/e2e/framework/network
- k8s.io/kubernetes/test/e2e/framework/node
- k8s.io/kubernetes/test/e2e/framework/pod
- k8s.io/kubernetes/test/e2e/framework/rc
- k8s.io/kubernetes/test/e2e/framework/resource
- k8s.io/kubernetes/test/e2e/framework/service
- k8s.io/kubernetes/test/e2e/framework/ssh
- k8s.io/kubernetes/test/e2e/framework/testfiles
- k8s.io/kubernetes/test/e2e/framework/websocket
- k8s.io/kubernetes/test/e2e/manifest
- k8s.io/kubernetes/test/e2e/perftype
- k8s.io/kubernetes/test/e2e/storage/utils
- k8s.io/kubernetes/test/e2e/system
- k8s.io/kubernetes/test/utils
- k8s.io/kubernetes/test/utils/image
# TODO: why is this here?
- selectorRegexp: k8s[.]io/kubernetes/third_party/
allowedPrefixes:
- k8s.io/kubernetes/third_party/forked/golang/expansion

View File

@ -8,11 +8,11 @@ go_library(
"expect.go",
"flake_reporting_util.go",
"framework.go",
"google_compute.go",
"log.go",
"log_size_monitoring.go",
"nodes_util.go",
"pods.go",
"ports.go",
"provider.go",
"psp.go",
"resource_usage_gatherer.go",
@ -23,17 +23,7 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/util/taints:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
@ -43,13 +33,13 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
@ -59,20 +49,22 @@ go_library(
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
"//staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/resource:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/system:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
@ -80,8 +72,7 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -114,6 +105,7 @@ filegroup(
"//test/e2e/framework/config:all-srcs",
"//test/e2e/framework/deployment:all-srcs",
"//test/e2e/framework/endpoints:all-srcs",
"//test/e2e/framework/endpointslice:all-srcs",
"//test/e2e/framework/events:all-srcs",
"//test/e2e/framework/ginkgowrapper:all-srcs",
"//test/e2e/framework/gpu:all-srcs",
@ -121,13 +113,14 @@ filegroup(
"//test/e2e/framework/job:all-srcs",
"//test/e2e/framework/kubectl:all-srcs",
"//test/e2e/framework/kubelet:all-srcs",
"//test/e2e/framework/kubesystem:all-srcs",
"//test/e2e/framework/log:all-srcs",
"//test/e2e/framework/manifest:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/network:all-srcs",
"//test/e2e/framework/node:all-srcs",
"//test/e2e/framework/perf:all-srcs",
"//test/e2e/framework/pod:all-srcs",
"//test/e2e/framework/podlogs:all-srcs",
"//test/e2e/framework/providers/aws:all-srcs",
"//test/e2e/framework/providers/azure:all-srcs",
"//test/e2e/framework/providers/gce:all-srcs",
@ -146,6 +139,7 @@ filegroup(
"//test/e2e/framework/testfiles:all-srcs",
"//test/e2e/framework/timer:all-srcs",
"//test/e2e/framework/volume:all-srcs",
"//test/e2e/framework/websocket:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],

View File

@ -16,22 +16,30 @@ limitations under the License.
package framework
import "sync"
import (
"sync"
)
// CleanupActionHandle is an integer pointer type for handling cleanup action
type CleanupActionHandle *int
type cleanupFuncHandle struct {
actionHandle CleanupActionHandle
actionHook func()
}
var cleanupActionsLock sync.Mutex
var cleanupActions = map[CleanupActionHandle]func(){}
var cleanupHookList = []cleanupFuncHandle{}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
// The hooks are called in last-in-first-out order.
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
cleanupActions[p] = fn
c := cleanupFuncHandle{actionHandle: p, actionHook: fn}
cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...)
return p
}
@ -40,7 +48,12 @@ func AddCleanupAction(fn func()) CleanupActionHandle {
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
delete(cleanupActions, p)
for i, item := range cleanupHookList {
if item.actionHandle == p {
cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...)
break
}
}
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
@ -51,8 +64,8 @@ func RunCleanupActions() {
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, fn := range cleanupActions {
list = append(list, fn)
for _, p := range cleanupHookList {
list = append(list, p.actionHook)
}
}()
// Run unlocked.

View File

@ -43,14 +43,16 @@ type ExecOptions struct {
CaptureStderr bool
// If false, whitespace in std{err,out} will be removed.
PreserveWhitespace bool
Quiet bool
}
// ExecWithOptions executes a command in the specified container,
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
Logf("ExecWithOptions %+v", options)
if !options.Quiet {
Logf("ExecWithOptions %+v", options)
}
config, err := LoadConfig()
ExpectNoError(err, "failed to load restclient config")

View File

@ -38,7 +38,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
@ -48,15 +47,12 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
@ -477,6 +473,38 @@ func (f *Framework) AfterEach() {
}
}
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to
// dump namespace information so as it can be used as an alternative of framework
// deleting the namespace towards the end.
func (f *Framework) DeleteNamespace(name string) {
defer func() {
err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
Logf("error deleting namespace %s: %v", name, err)
return
}
err = WaitForNamespacesDeleted(f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout)
if err != nil {
Logf("error deleting namespace %s: %v", name, err)
return
}
// remove deleted namespace from namespacesToDelete map
for i, ns := range f.namespacesToDelete {
if ns == nil {
continue
}
if ns.Name == name {
f.namespacesToDelete = append(f.namespacesToDelete[:i], f.namespacesToDelete[i+1:]...)
}
}
}()
// if current test failed then we should dump namespace information
if !f.SkipNamespaceCreation && ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
DumpAllNamespaceInfo(f.ClientSet, name)
}
}
// CreateNamespace creates a namespace for e2e testing.
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
createTestingNS := TestContext.CreateTestingNS
@ -513,38 +541,6 @@ func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
}
}
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
return e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
}
// WaitForPodNotFound waits for the pod to be completely terminated (not "Get-able").
func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) error {
return e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
}
// WaitForPodRunning waits for the pod to run in the namespace.
func (f *Framework) WaitForPodRunning(podName string) error {
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
}
// WaitForPodReady waits for the pod to flip to ready in the namespace.
func (f *Framework) WaitForPodReady(podName string) error {
return e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
}
// WaitForPodRunningSlow waits for the pod to run in the namespace.
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
func (f *Framework) WaitForPodRunningSlow(podName string) error {
return e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
}
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
// success or failure.
func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
return e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
}
// ClientConfig an externally accessible method for reading the kube client config.
func (f *Framework) ClientConfig() *rest.Config {
ret := rest.CopyConfig(f.clientConfig)
@ -568,83 +564,13 @@ func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod,
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
}
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) {
var err error
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
if block {
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
}
return theService, err
}
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *v1.Service {
if appName == "" {
panic(fmt.Sprintf("no app name provided"))
}
serviceSelector := map[string]string{
"app": appName + "-pod",
}
// For convenience, user sending ports are optional.
portsFunc := func() []v1.ServicePort {
if contPort < 1 || svcPort < 1 {
return nil
}
return []v1.ServicePort{{
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
}}
}
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service-for-" + appName,
Labels: map[string]string{
"app": appName + "-service",
},
},
Spec: v1.ServiceSpec{
Ports: portsFunc(),
Selector: serviceSelector,
},
}, metav1.CreateOptions{})
ExpectNoError(err)
return service
}
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxCount)
ExpectNoError(err)
podLabels := map[string]string{
"app": appName + "-pod",
}
for i, node := range nodes.Items {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: podLabels,
},
Spec: podSpec(node),
}, metav1.CreateOptions{})
ExpectNoError(err)
}
return podLabels
}
// KubeUser is a struct for managing kubernetes user info.
type KubeUser struct {
Name string `yaml:"name"`
User struct {
Username string `yaml:"username"`
Password string `yaml:"password"`
Token string `yaml:"token"`
Password string `yaml:"password" datapolicy:"password"`
Token string `yaml:"token" datapolicy:"token"`
} `yaml:"user"`
}

View File

@ -1,131 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"encoding/json"
"fmt"
"io/ioutil"
"os/exec"
"path/filepath"
"strings"
)
// TODO: These should really just use the GCE API client library or at least use
// better formatted output from the --format flag.
// Returns master & node image string, or error
func lookupClusterImageSources() (string, string, error) {
// Given args for a gcloud compute command, run it with other args, and return the values,
// whether separated by newlines, commas or semicolons.
gcloudf := func(argv ...string) ([]string, error) {
args := []string{"compute"}
args = append(args, argv...)
args = append(args, "--project", TestContext.CloudConfig.ProjectID)
if TestContext.CloudConfig.MultiMaster {
args = append(args, "--region", TestContext.CloudConfig.Region)
} else {
args = append(args, "--zone", TestContext.CloudConfig.Zone)
}
outputBytes, err := exec.Command("gcloud", args...).CombinedOutput()
str := strings.Replace(string(outputBytes), ",", "\n", -1)
str = strings.Replace(str, ";", "\n", -1)
lines := strings.Split(str, "\n")
if err != nil {
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
for _, l := range lines {
Logf(" > %s", l)
}
}
return lines, err
}
// Given a GCE instance, look through its disks, finding one that has a sourceImage
host2image := func(instance string) (string, error) {
// gcloud compute instances describe {INSTANCE} --format="get(disks[].source)"
// gcloud compute disks describe {DISKURL} --format="get(sourceImage)"
disks, err := gcloudf("instances", "describe", instance, "--format=get(disks[].source)")
if err != nil {
return "", err
} else if len(disks) == 0 {
return "", fmt.Errorf("instance %q had no findable disks", instance)
}
// Loop over disks, looking for the boot disk
for _, disk := range disks {
lines, err := gcloudf("disks", "describe", disk, "--format=get(sourceImage)")
if err != nil {
return "", err
} else if len(lines) > 0 && lines[0] != "" {
return lines[0], nil // break, we're done
}
}
return "", fmt.Errorf("instance %q had no disk with a sourceImage", instance)
}
// gcloud compute instance-groups list-instances {GROUPNAME} --format="get(instance)"
nodeName := ""
instGroupName := strings.Split(TestContext.CloudConfig.NodeInstanceGroup, ",")[0]
if lines, err := gcloudf("instance-groups", "list-instances", instGroupName, "--format=get(instance)"); err != nil {
return "", "", err
} else if len(lines) == 0 {
return "", "", fmt.Errorf("no instances inside instance-group %q", instGroupName)
} else {
nodeName = lines[0]
}
nodeImg, err := host2image(nodeName)
if err != nil {
return "", "", err
}
frags := strings.Split(nodeImg, "/")
nodeImg = frags[len(frags)-1]
// For GKE clusters, MasterName will not be defined; we just leave masterImg blank.
masterImg := ""
if masterName := TestContext.CloudConfig.MasterName; masterName != "" {
img, err := host2image(masterName)
if err != nil {
return "", "", err
}
frags = strings.Split(img, "/")
masterImg = frags[len(frags)-1]
}
return masterImg, nodeImg, nil
}
// LogClusterImageSources writes out cluster image sources.
func LogClusterImageSources() {
masterImg, nodeImg, err := lookupClusterImageSources()
if err != nil {
Logf("Cluster image sources lookup failed: %v\n", err)
return
}
Logf("cluster-master-image: %s", masterImg)
Logf("cluster-node-image: %s", nodeImg)
images := map[string]string{
"master_os_image": masterImg,
"node_os_image": nodeImg,
}
outputBytes, _ := json.MarshalIndent(images, "", " ")
filePath := filepath.Join(TestContext.ReportDir, "images.json")
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
Logf("cluster images sources, could not write to %q: %v", filePath, err)
}
}

View File

@ -86,6 +86,9 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd {
fmt.Sprintf("--client-key=%s", filepath.Join(tk.CertDir, "kubecfg.key")))
}
}
if tk.Namespace != "" {
defaultArgs = append(defaultArgs, fmt.Sprintf("--namespace=%s", tk.Namespace))
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"

View File

@ -26,7 +26,7 @@ import (
"github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -42,18 +42,13 @@ func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
// Failf logs the fail info, including a stack trace.
// Failf logs the fail info, including a stack trace starts at 2 levels above its caller
// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "f").
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error with a stack trace that starts at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
skip := offset + 1
skip := 2
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
}
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
@ -64,7 +59,7 @@ func Fail(msg string, callerSkip ...int) {
skip += callerSkip[0]
}
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
}
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`)

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -50,5 +50,5 @@ func Failf(format string, args ...interface{}) {
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("FAIL", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}

View File

@ -159,7 +159,7 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(c)
ExpectNoError(err)
masterAddress := GetMasterHost() + ":22"
instanceAddress := APIAddress() + ":22"
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
workers := make([]*LogSizeGatherer, workersNo)
@ -167,8 +167,8 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri
verifier := &LogsSizeVerifier{
client: c,
stopChannel: stopChannel,
data: prepareData(masterAddress, nodeAddresses),
masterAddress: masterAddress,
data: prepareData(instanceAddress, nodeAddresses),
masterAddress: instanceAddress,
nodeAddresses: nodeAddresses,
wg: sync.WaitGroup{},
workChannel: workChannel,

View File

@ -22,9 +22,6 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
deps = [
"//pkg/kubelet/dockershim/metrics:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/master/ports:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@ -34,8 +31,7 @@ go_library(
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/e2e/system:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -29,13 +29,26 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil"
dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
const (
proxyTimeout = 2 * time.Minute
// dockerOperationsLatencyKey is the key for the operation latency metrics.
// Taken from k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
dockerOperationsLatencyKey = "docker_operations_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
kubeletSubsystem = "kubelet"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
podWorkerDurationKey = "pod_worker_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
podStartDurationKey = "pod_start_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
cgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
podWorkerStartDurationKey = "pod_worker_start_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
plegRelistDurationKey = "pleg_relist_duration_seconds"
)
// KubeletMetrics is metrics for kubelet
@ -143,7 +156,7 @@ func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics,
kubeletMetrics := make(KubeletMetrics)
for name, samples := range ms {
const prefix = kubeletmetrics.KubeletSubsystem + "_"
const prefix = kubeletSubsystem + "_"
if !strings.HasPrefix(name, prefix) {
// Not a kubelet metric.
continue
@ -159,13 +172,13 @@ func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics,
// Note that the KubeletMetrics passed in should not contain subsystem prefix.
func GetDefaultKubeletLatencyMetrics(ms KubeletMetrics) KubeletLatencyMetrics {
latencyMetricNames := sets.NewString(
kubeletmetrics.PodWorkerDurationKey,
kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PodStartDurationKey,
kubeletmetrics.CgroupManagerOperationsKey,
dockermetrics.DockerOperationsLatencyKey,
kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PLEGRelistDurationKey,
podWorkerDurationKey,
podWorkerStartDurationKey,
podStartDurationKey,
cgroupManagerOperationsKey,
dockerOperationsLatencyKey,
podWorkerStartDurationKey,
plegRelistDurationKey,
)
return GetKubeletLatencyMetrics(ms, latencyMetricNames)
}

View File

@ -19,6 +19,7 @@ package metrics
import (
"context"
"fmt"
"regexp"
"sync"
"time"
@ -26,11 +27,20 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/master/ports"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/system"
"k8s.io/klog"
"k8s.io/klog/v2"
)
const (
// insecureSchedulerPort is the default port for the scheduler status server.
// May be overridden by a flag at startup.
// Deprecated: use the secure KubeSchedulerPort instead.
insecureSchedulerPort = 10251
// insecureKubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
// Deprecated: use the secure KubeControllerManagerPort instead.
insecureKubeControllerManagerPort = 10252
)
// Collection is metrics collection of components
@ -51,38 +61,48 @@ type Grabber struct {
grabFromKubelets bool
grabFromScheduler bool
grabFromClusterAutoscaler bool
masterName string
registeredMaster bool
kubeScheduler string
kubeControllerManager string
waitForControllerManagerReadyOnce sync.Once
}
// NewMetricsGrabber returns new metrics which are initialized.
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) {
registeredMaster := false
masterName := ""
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
kubeScheduler := ""
kubeControllerManager := ""
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
if len(nodeList.Items) < 1 {
klog.Warning("Can't find any Nodes in the API server to grab metrics from")
if len(podList.Items) < 1 {
klog.Warningf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
}
for _, node := range nodeList.Items {
if system.DeprecatedMightBeMasterNode(node.Name) {
registeredMaster = true
masterName = node.Name
for _, pod := range podList.Items {
if regKubeScheduler.MatchString(pod.Name) {
kubeScheduler = pod.Name
}
if regKubeControllerManager.MatchString(pod.Name) {
kubeControllerManager = pod.Name
}
if kubeScheduler != "" && kubeControllerManager != "" {
break
}
}
if !registeredMaster {
if kubeScheduler == "" {
scheduler = false
klog.Warningf("Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled.")
}
if kubeControllerManager == "" {
controllers = false
clusterAutoscaler = ec != nil
if clusterAutoscaler {
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.")
} else {
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.")
}
klog.Warningf("Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled.")
}
if ec == nil {
klog.Warningf("Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.")
}
return &Grabber{
@ -93,14 +113,14 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
grabFromKubelets: kubelets,
grabFromScheduler: scheduler,
grabFromClusterAutoscaler: clusterAutoscaler,
masterName: masterName,
registeredMaster: registeredMaster,
kubeScheduler: kubeScheduler,
kubeControllerManager: kubeControllerManager,
}, nil
}
// HasRegisteredMaster returns if metrics grabber was able to find a master node
func (g *Grabber) HasRegisteredMaster() bool {
return g.registeredMaster
// HasControlPlanePods returns true if metrics grabber was able to find control-plane pods
func (g *Grabber) HasControlPlanePods() bool {
return g.kubeScheduler != "" && g.kubeControllerManager != ""
}
// GrabFromKubelet returns metrics from kubelet
@ -129,10 +149,10 @@ func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (Kub
// GrabFromScheduler returns metrics from scheduler
func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
if !g.registeredMaster {
return SchedulerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping Scheduler's metrics gathering")
if g.kubeScheduler == "" {
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler pod is not registered. Skipping Scheduler's metrics gathering")
}
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.InsecureSchedulerPort)
output, err := g.getMetricsFromPod(g.client, g.kubeScheduler, metav1.NamespaceSystem, insecureSchedulerPort)
if err != nil {
return SchedulerMetrics{}, err
}
@ -141,8 +161,8 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
// GrabFromClusterAutoscaler returns metrics from cluster autoscaler
func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) {
if !g.registeredMaster && g.externalClient == nil {
return ClusterAutoscalerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ClusterAutoscaler's metrics gathering")
if !g.HasControlPlanePods() && g.externalClient == nil {
return ClusterAutoscalerMetrics{}, fmt.Errorf("Did not find control-plane pods. Skipping ClusterAutoscaler's metrics gathering")
}
var client clientset.Interface
var namespace string
@ -162,12 +182,12 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
// GrabFromControllerManager returns metrics from controller manager
func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) {
if !g.registeredMaster {
return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering")
if g.kubeControllerManager == "" {
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager pod is not registered. Skipping ControllerManager's metrics gathering")
}
var err error
podName := fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName)
podName := g.kubeControllerManager
g.waitForControllerManagerReadyOnce.Do(func() {
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
err = fmt.Errorf("error waiting for controller manager pod to be ready: %w", readyErr)
@ -176,7 +196,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err = fmt.Errorf("error waiting for controller manager pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -187,7 +207,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
return ControllerManagerMetrics{}, err
}
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
if err != nil {
return ControllerManagerMetrics{}, err
}

View File

@ -10,24 +10,41 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/system:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/utils/net:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["wait_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
@ -41,17 +58,3 @@ filegroup(
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["wait_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
],
)

View File

@ -18,21 +18,30 @@ package node
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
clientretry "k8s.io/client-go/util/retry"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
netutil "k8s.io/utils/net"
)
const (
@ -47,6 +56,29 @@ const (
sshPort = "22"
)
var (
// unreachableTaintTemplate is the taint for when a node becomes unreachable.
// Copied from pkg/controller/nodelifecycle to avoid pulling extra dependencies
unreachableTaintTemplate = &v1.Taint{
Key: v1.TaintNodeUnreachable,
Effect: v1.TaintEffectNoExecute,
}
// notReadyTaintTemplate is the taint for when a node is not ready for executing pods.
// Copied from pkg/controller/nodelifecycle to avoid pulling extra dependencies
notReadyTaintTemplate = &v1.Taint{
Key: v1.TaintNodeNotReady,
Effect: v1.TaintEffectNoExecute,
}
// updateTaintBackOff contains the maximum retries and the wait interval between two retries.
updateTaintBackOff = wait.Backoff{
Steps: 5,
Duration: 100 * time.Millisecond,
Jitter: 1.0,
}
)
// PodNode is a pod-node pair indicating which node a given pod is running on
type PodNode struct {
// Pod represents pod name
@ -78,7 +110,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
if taint.MatchTaint(unreachableTaintTemplate) || taint.MatchTaint(notReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
@ -160,7 +192,7 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
nodeList.Items = l
}
// TotalRegistered returns number of registered Nodes excluding Master Node.
// TotalRegistered returns number of schedulable Nodes.
func TotalRegistered(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
@ -170,7 +202,7 @@ func TotalRegistered(c clientset.Interface) (int, error) {
return len(nodes.Items), nil
}
// TotalReady returns number of ready Nodes excluding Master Node.
// TotalReady returns number of ready schedulable Nodes.
func TotalReady(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
@ -217,6 +249,37 @@ func GetInternalIP(node *v1.Node) (string, error) {
return host, nil
}
// FirstAddressByTypeAndFamily returns the first address that matches the given type and family of the list of nodes
func FirstAddressByTypeAndFamily(nodelist *v1.NodeList, addrType v1.NodeAddressType, family v1.IPFamily) string {
for _, n := range nodelist.Items {
addresses := GetAddressesByTypeAndFamily(&n, addrType, family)
if len(addresses) > 0 {
return addresses[0]
}
}
return ""
}
// GetAddressesByTypeAndFamily returns a list of addresses of the given addressType for the given node
// and filtered by IPFamily
func GetAddressesByTypeAndFamily(node *v1.Node, addressType v1.NodeAddressType, family v1.IPFamily) (ips []string) {
for _, nodeAddress := range node.Status.Addresses {
if nodeAddress.Type != addressType {
continue
}
if nodeAddress.Address == "" {
continue
}
if family == v1.IPv6Protocol && netutil.IsIPv6String(nodeAddress.Address) {
ips = append(ips, nodeAddress.Address)
}
if family == v1.IPv4Protocol && !netutil.IsIPv6String(nodeAddress.Address) {
ips = append(ips, nodeAddress.Address)
}
}
return
}
// GetAddresses returns a list of addresses of the given addressType for the given node
func GetAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
for j := range node.Status.Addresses {
@ -329,24 +392,6 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
return nodes, nil
}
// GetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, nil, fmt.Errorf("get nodes error: %s", err)
}
for _, n := range all.Items {
if system.DeprecatedMightBeMasterNode(n.Name) {
masters.Insert(n.Name)
} else if IsNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes, nil
}
// isNodeUntainted tests whether a fake pod can be scheduled on "node", given its current taints.
// TODO: need to discuss wether to return bool and error type
func isNodeUntainted(node *v1.Node) bool {
@ -375,8 +420,6 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
},
}
nodeInfo := schedulernodeinfo.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{}
for _, t := range strings.Split(nonblockingTaints, ",") {
@ -385,6 +428,7 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
}
}
n := node
if len(nonblockingTaintsMap) > 0 {
nodeCopy := node.DeepCopy()
nodeCopy.Spec.Taints = []v1.Taint{}
@ -393,20 +437,36 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v)
}
}
nodeInfo.SetNode(nodeCopy)
} else {
nodeInfo.SetNode(node)
n = nodeCopy
}
return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, fakePod.Spec.Tolerations)
}
func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool {
filteredTaints := []v1.Taint{}
for _, taint := range taints {
if taint.Effect == v1.TaintEffectNoExecute || taint.Effect == v1.TaintEffectNoSchedule {
filteredTaints = append(filteredTaints, taint)
}
}
taints, err := nodeInfo.Taints()
if err != nil {
e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err)
toleratesTaint := func(taint v1.Taint) bool {
for _, toleration := range tolerations {
if toleration.ToleratesTaint(&taint) {
return true
}
}
return false
}
return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
})
for _, taint := range filteredTaints {
if !toleratesTaint(taint) {
return false
}
}
return true
}
// IsNodeSchedulable returns true if:
@ -429,6 +489,14 @@ func IsNodeReady(node *v1.Node) bool {
return nodeReady && networkReady
}
// isNodeSchedulableWithoutTaints returns true if:
// 1) doesn't have "unschedulable" field set
// 2) it also returns true from IsNodeReady
// 3) it also returns true from isNodeUntainted
func isNodeSchedulableWithoutTaints(node *v1.Node) bool {
return IsNodeSchedulable(node) && isNodeUntainted(node)
}
// hasNonblockingTaint returns true if the node contains at least
// one taint with a key matching the regexp.
func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
@ -471,3 +539,295 @@ func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
return result, nil
}
// GetClusterZones returns the values of zone label collected from all nodes.
func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
}
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
zones.Insert(zone)
}
if zone, found := node.Labels[v1.LabelTopologyZone]; found {
zones.Insert(zone)
}
}
return zones, nil
}
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
podLabels := map[string]string{
"app": appName + "-pod",
}
for i, node := range nodes.Items {
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: podLabels,
},
Spec: podSpec(node),
}, metav1.CreateOptions{})
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
}
return podLabels
}
// RemoveTaintOffNode removes the given taint from the given node.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
err := removeNodeTaint(c, nodeName, nil, &taint)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
verifyThatTaintIsGone(c, nodeName, &taint)
}
// AddOrUpdateTaintOnNode adds the given taint to the given node or updates taint.
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
// TODO use wrapper methods in expect.go after removing the dependency on this
// package from the core e2e framework.
err := addOrUpdateTaintOnNode(c, nodeName, &taint)
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
}
// addOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
// to update nodes; otherwise, no API calls. Return error if any.
// copied from pkg/controller/controller_utils.go AddOrUpdateTaintOnNode()
func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
firstTry := true
return clientretry.RetryOnConflict(updateTaintBackOff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := addOrUpdateTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("failed to update taint of node")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// addOrUpdateTaint tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated
// false otherwise.
// copied from pkg/util/taints/taints.go AddOrUpdateTaint()
func addOrUpdateTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) {
newNode := node.DeepCopy()
nodeTaints := newNode.Spec.Taints
var newTaints []v1.Taint
updated := false
for i := range nodeTaints {
if taint.MatchTaint(&nodeTaints[i]) {
if semantic.DeepEqual(*taint, nodeTaints[i]) {
return newNode, false, nil
}
newTaints = append(newTaints, *taint)
updated = true
continue
}
newTaints = append(newTaints, nodeTaints[i])
}
if !updated {
newTaints = append(newTaints, *taint)
}
newNode.Spec.Taints = newTaints
return newNode, true, nil
}
// semantic can do semantic deep equality checks for core objects.
// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
// copied from pkg/apis/core/helper/helpers.go Semantic
var semantic = conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
return a.Cmp(b) == 0
},
func(a, b metav1.MicroTime) bool {
return a.UTC() == b.UTC()
},
func(a, b metav1.Time) bool {
return a.UTC() == b.UTC()
},
func(a, b labels.Selector) bool {
return a.String() == b.String()
},
func(a, b fields.Selector) bool {
return a.String() == b.String()
},
)
// removeNodeTaint is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
for _, taint := range taints {
if taintExists(node.Spec.Taints, taint) {
match = true
break
}
}
if !match {
return nil
}
}
firstTry := true
return clientretry.RetryOnConflict(updateTaintBackOff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := removeTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("failed to remove taint of node")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// patchNodeTaints patches node's taints.
func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
}
newTaints := newNode.Spec.Taints
newNodeClone := oldNode.DeepCopy()
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
}
// removeTaint tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated
// false otherwise.
func removeTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) {
newNode := node.DeepCopy()
nodeTaints := newNode.Spec.Taints
if len(nodeTaints) == 0 {
return newNode, false, nil
}
if !taintExists(nodeTaints, taint) {
return newNode, false, nil
}
newTaints, _ := deleteTaint(nodeTaints, taint)
newNode.Spec.Taints = newTaints
return newNode, true, nil
}
// deleteTaint removes all the taints that have the same key and effect to given taintToDelete.
func deleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) {
var newTaints []v1.Taint
deleted := false
for i := range taints {
if taintToDelete.MatchTaint(&taints[i]) {
deleted = true
continue
}
newTaints = append(newTaints, taints[i])
}
return newTaints, deleted
}
func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
if taintExists(nodeUpdated.Spec.Taints, taint) {
e2elog.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}

View File

@ -28,8 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
testutils "k8s.io/kubernetes/test/utils"
)
const sleepTime = 20 * time.Second
@ -41,8 +39,7 @@ var requiredPerNodePods = []*regexp.Regexp{
}
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckReady(c, size, timeout)
return err
@ -59,9 +56,6 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, node := range nodes.Items {
@ -84,7 +78,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.DeprecatedMightBeMasterNode(node.Name) {
if isNodeSchedulableWithoutTaints(&node) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
@ -150,8 +144,7 @@ func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Durat
}
// CheckReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
nodes, err := waitListSchedulableNodes(c)
@ -187,9 +180,6 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
@ -208,30 +198,25 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
return nodes, nil
}
// CheckReadyForTests returns a method usable in polling methods which will check that the nodes are
// in a testable state based on schedulability.
// CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests).
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
attempt := 0
var notSchedulable []*v1.Node
return func() (bool, error) {
attempt++
notSchedulable = nil
var nodesNotReadyYet []v1.Node
opts := metav1.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
// remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic.
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
if err != nil {
e2elog.Logf("Unexpected error listing nodes: %v", err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !readyForTests(node, nonblockingTaints) {
notSchedulable = append(notSchedulable, node)
for _, node := range allNodes.Items {
if !readyForTests(&node, nonblockingTaints) {
nodesNotReadyYet = append(nodesNotReadyYet, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
@ -240,25 +225,29 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notSchedulable) > 0 {
// We log the *reason* why nodes are not schedulable, specifically, its usually the network not being available.
if len(nodesNotReadyYet) > 0 {
// In large clusters, log them only every 10th pass.
if len(nodes.Items) < largeClusterThreshold || attempt%10 == 0 {
e2elog.Logf("Unschedulable nodes:")
for i := range notSchedulable {
e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v NonblockingTaints:%v",
notSchedulable[i].Name,
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
notSchedulable[i].Spec.Taints,
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
for _, node := range nodesNotReadyYet {
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
node.Name,
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
node.Spec.Taints,
nonblockingTaints,
)
}
e2elog.Logf("================================")
if len(nodesNotReadyYet) > allowedNotReadyNodes {
ready := len(allNodes.Items) - len(nodesNotReadyYet)
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
}
}
}
return len(notSchedulable) <= allowedNotReadyNodes, nil
return len(nodesNotReadyYet) <= allowedNotReadyNodes, nil
}
}

View File

@ -20,11 +20,11 @@ import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -34,7 +34,7 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const etcdImage = "3.4.3-0"
const etcdImage = "3.4.13-0"
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {
@ -46,20 +46,6 @@ func EtcdUpgrade(targetStorage, targetVersion string) error {
}
}
// MasterUpgrade upgrades master node on GCE/GKE.
func MasterUpgrade(f *Framework, v string) error {
switch TestContext.Provider {
case "gce":
return masterUpgradeGCE(v, false)
case "gke":
return masterUpgradeGKE(f.Namespace.Name, v)
case "kubernetes-anywhere":
return masterUpgradeKubernetesAnywhere(v)
default:
return fmt.Errorf("MasterUpgrade() is not implemented for provider %s", TestContext.Provider)
}
}
func etcdUpgradeGCE(targetStorage, targetVersion string) error {
env := append(
os.Environ(),
@ -67,37 +53,12 @@ func etcdUpgradeGCE(targetStorage, targetVersion string) error {
"STORAGE_BACKEND="+targetStorage,
"TEST_ETCD_IMAGE="+etcdImage)
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
return err
}
// MasterUpgradeGCEWithKubeProxyDaemonSet upgrades master node on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func MasterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error {
return masterUpgradeGCE(v, enableKubeProxyDaemonSet)
}
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
// TODO: Remove these variables when they're no longer needed for downgrades.
if TestContext.EtcdUpgradeVersion != "" && TestContext.EtcdUpgradeStorage != "" {
env = append(env,
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
"TEST_ETCD_IMAGE="+etcdImage)
} else {
// In e2e tests, we skip the confirmation prompt about
// implicit etcd upgrades to simulate the user entering "y".
env = append(env, "TEST_ALLOW_IMPLICIT_ETCD_UPGRADE=true")
}
v := "v" + rawV
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-M", v)
return err
}
func locationParamGKE() string {
// LocationParamGKE returns parameter related to location for gcloud command.
func LocationParamGKE() string {
if TestContext.CloudConfig.MultiMaster {
// GKE Regional Clusters are being tested.
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
@ -105,7 +66,8 @@ func locationParamGKE() string {
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
}
func appendContainerCommandGroupIfNeeded(args []string) []string {
// AppendContainerCommandGroupIfNeeded returns container command group parameter if necessary.
func AppendContainerCommandGroupIfNeeded(args []string) []string {
if TestContext.CloudConfig.Region != "" {
// TODO(wojtek-t): Get rid of it once Regional Clusters go to GA.
return append([]string{"beta"}, args...)
@ -113,184 +75,40 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
return args
}
func masterUpgradeGKE(namespace string, v string) error {
// MasterUpgradeGKE upgrades master node to the specified version on GKE.
func MasterUpgradeGKE(namespace string, v string) error {
Logf("Upgrading master to %q", v)
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
LocationParamGKE(),
"upgrade",
TestContext.CloudConfig.Cluster,
"--master",
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
_, _, err := RunCmd("gcloud", AppendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
waitForSSHTunnels(namespace)
WaitForSSHTunnels(namespace)
return nil
}
func masterUpgradeKubernetesAnywhere(v string) error {
Logf("Upgrading master to %q", v)
kaPath := TestContext.KubernetesAnywherePath
originalConfigPath := filepath.Join(kaPath, ".config")
backupConfigPath := filepath.Join(kaPath, ".config.bak")
updatedConfigPath := filepath.Join(kaPath, fmt.Sprintf(".config-%s", v))
// modify config with specified k8s version
if _, _, err := RunCmd("sed",
"-i.bak", // writes original to .config.bak
fmt.Sprintf(`s/kubernetes_version=.*$/kubernetes_version=%q/`, v),
originalConfigPath); err != nil {
return err
}
defer func() {
// revert .config.bak to .config
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
}
}()
// invoke ka upgrade
if _, _, err := RunCmd("make", "-C", TestContext.KubernetesAnywherePath,
"WAIT_FOR_KUBECONFIG=y", "upgrade-master"); err != nil {
return err
}
// move .config to .config.<version>
if err := os.Rename(originalConfigPath, updatedConfigPath); err != nil {
return err
}
return nil
}
// NodeUpgrade upgrades nodes on GCE/GKE.
func NodeUpgrade(f *Framework, v string, img string) error {
// Perform the upgrade.
var err error
switch TestContext.Provider {
case "gce":
err = nodeUpgradeGCE(v, img, false)
case "gke":
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
default:
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
}
if err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
// NodeUpgradeGCEWithKubeProxyDaemonSet upgrades nodes on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
// Perform the upgrade.
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
func waitForNodesReadyAfterUpgrade(f *Framework) error {
// Wait for it to complete and validate nodes are healthy.
//
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are.
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
if err != nil {
return fmt.Errorf("couldn't detect number of nodes")
}
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
return err
}
return nil
}
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
v := "v" + rawV
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
if img != "" {
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", "-o", v)
return err
}
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", v)
return err
}
func nodeUpgradeGKE(namespace string, v string, img string) error {
Logf("Upgrading nodes to version %q and image %q", v, img)
nps, err := nodePoolsGKE()
if err != nil {
return err
}
Logf("Found node pools %v", nps)
for _, np := range nps {
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
"upgrade",
TestContext.CloudConfig.Cluster,
fmt.Sprintf("--node-pool=%s", np),
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
if len(img) > 0 {
args = append(args, fmt.Sprintf("--image-type=%s", img))
}
_, _, err = RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
waitForSSHTunnels(namespace)
}
return nil
}
func nodePoolsGKE() ([]string, error) {
args := []string{
"container",
"node-pools",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
"list",
fmt.Sprintf("--cluster=%s", TestContext.CloudConfig.Cluster),
"--format=get(name)",
}
stdout, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return nil, err
}
if len(strings.TrimSpace(stdout)) == 0 {
return []string{}, nil
}
return strings.Fields(stdout), nil
}
func gceUpgradeScript() string {
// GCEUpgradeScript returns path of script for upgrading on GCE.
func GCEUpgradeScript() string {
if len(TestContext.GCEUpgradeScript) == 0 {
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
}
return TestContext.GCEUpgradeScript
}
func waitForSSHTunnels(namespace string) {
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
Logf("Waiting for SSH tunnels to establish")
RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",
@ -345,6 +163,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
for _, node := range nodes {
node := node
go func() {
defer ginkgo.GinkgoRecover()
defer wg.Done()
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)

View File

@ -12,25 +12,21 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/pod",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/resource:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -19,6 +19,7 @@ package pod
import (
"context"
"fmt"
"runtime"
"time"
v1 "k8s.io/api/core/v1"
@ -33,6 +34,24 @@ var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
)
// Config is a struct containing all arguments for creating a pod.
// SELinux testing requires to pass HostIPC and HostPID as boolean arguments.
type Config struct {
NS string
PVCs []*v1.PersistentVolumeClaim
PVCsReadOnly bool
InlineVolumeSources []*v1.VolumeSource
IsPrivileged bool
Command string
HostIPC bool
HostPID bool
SeLinuxLabel *v1.SELinuxOptions
FsGroup *int64
NodeSelection NodeSelection
ImageID int
PodFSGroupChangePolicy *v1.PodFSGroupChangePolicy
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
@ -79,27 +98,29 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
}
// CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
func CreateSecPod(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, podConfig, timeout)
}
// CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
SetNodeSelection(&pod.Spec, node)
func CreateSecPodWithNodeSelection(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod, err := MakeSecPod(podConfig)
if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err)
}
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, podConfig.NS, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
@ -153,17 +174,23 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
// MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod.
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
if podConfig.NS == "" {
return nil, fmt.Errorf("Cannot create pod with empty namespace")
}
podName := "security-context-" + string(uuid.NewUUID())
if fsGroup == nil {
fsGroup = func(i int64) *int64 {
if len(podConfig.Command) == 0 {
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
}
podName := "pod-" + string(uuid.NewUUID())
if podConfig.FsGroup == nil && runtime.GOOS != "windows" {
podConfig.FsGroup = func(i int64) *int64 {
return &i
}(1000)
}
image := imageutils.BusyBox
if podConfig.ImageID != imageutils.None {
image = podConfig.ImageID
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -171,33 +198,37 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Namespace: podConfig.NS,
},
Spec: v1.PodSpec{
HostIPC: hostIPC,
HostPID: hostPID,
HostIPC: podConfig.HostIPC,
HostPID: podConfig.HostPID,
SecurityContext: &v1.PodSecurityContext{
FSGroup: fsGroup,
FSGroup: podConfig.FsGroup,
},
Containers: []v1.Container{
{
Name: "write-pod",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Image: imageutils.GetE2EImage(image),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
Args: []string{"-c", podConfig.Command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
Privileged: &podConfig.IsPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
if podConfig.PodFSGroupChangePolicy != nil {
podSpec.Spec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
}
var volumeMounts = make([]v1.VolumeMount, 0)
var volumeDevices = make([]v1.VolumeDevice, 0)
var volumes = make([]v1.Volume, len(pvclaims)+len(inlineVolumeSources))
var volumes = make([]v1.Volume, len(podConfig.PVCs)+len(podConfig.InlineVolumeSources))
volumeIndex := 0
for _, pvclaim := range pvclaims {
for _, pvclaim := range podConfig.PVCs {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
@ -205,10 +236,10 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}}
volumeIndex++
}
for _, src := range inlineVolumeSources {
for _, src := range podConfig.InlineVolumeSources {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
// In-line volumes can be only filesystem, not block.
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
@ -219,6 +250,10 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
podSpec.Spec.Volumes = volumes
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
return podSpec
if runtime.GOOS != "windows" {
podSpec.Spec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel
}
SetNodeSelection(&podSpec.Spec, podConfig.NodeSelection)
return podSpec, nil
}

View File

@ -35,10 +35,15 @@ const (
PodDeleteTimeout = 5 * time.Minute
)
// DeletePodOrFail deletes the pod of the specified namespace and name.
// DeletePodOrFail deletes the pod of the specified namespace and name. Resilient to the pod
// not existing.
func DeletePodOrFail(c clientset.Interface, ns, name string) {
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && apierrors.IsNotFound(err) {
return
}
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
}
@ -69,3 +74,31 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string
}
return nil
}
// DeletePodWithGracePeriod deletes the passed-in pod. Resilient to the pod not existing.
func DeletePodWithGracePeriod(c clientset.Interface, pod *v1.Pod, grace int64) error {
return DeletePodWithGracePeriodByName(c, pod.GetName(), pod.GetNamespace(), grace)
}
// DeletePodsWithGracePeriod deletes the passed-in pods. Resilient to the pods not existing.
func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64) error {
for _, pod := range pods {
if err := DeletePodWithGracePeriod(c, &pod, grace); err != nil {
return err
}
}
return nil
}
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
if err != nil {
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
}
return nil
}

View File

@ -32,14 +32,17 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/util/podutils"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached completed state.
var errPodCompleted = fmt.Errorf("pod ran to completion")
// TODO: Move to its own subpkg.
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
func expectNoError(err error, explain ...interface{}) {
@ -155,7 +158,7 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
return false, errPodCompleted
}
return false, nil
}
@ -184,10 +187,10 @@ func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.C
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
e2elog.Logf("The status of Pod %s is %s which is unexpected", podName, pod.Status.Phase)
return false, conditions.ErrPodCompleted
return false, errPodCompleted
case v1.PodRunning:
e2elog.Logf("The status of Pod %s is %s (Ready = %v)", podName, pod.Status.Phase, podutil.IsPodReady(pod))
return podutil.IsPodReady(pod), nil
e2elog.Logf("The status of Pod %s is %s (Ready = %v)", podName, pod.Status.Phase, podutils.IsPodReady(pod))
return podutils.IsPodReady(pod), nil
}
e2elog.Logf("The status of Pod %s is %s, waiting for it to be Running (with Ready = true)", podName, pod.Status.Phase)
return false, nil
@ -295,6 +298,29 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
return e
}
func podContainerFailed(c clientset.Interface, namespace, podName string, containerIndex int, reason string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodPending:
if len(pod.Status.ContainerStatuses) == 0 {
return false, nil
}
containerStatus := pod.Status.ContainerStatuses[containerIndex]
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == reason {
return true, nil
}
return false, nil
case v1.PodFailed, v1.PodRunning, v1.PodSucceeded:
return false, fmt.Errorf("pod was expected to be pending, but it is in the state: %s", pod.Status.Phase)
}
return false, nil
}
}
// LogPodStates logs basic info of provided pods for debugging.
func LogPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
@ -376,29 +402,28 @@ func FilterNonRestartablePods(pods []*v1.Pod) []*v1.Pod {
}
func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool {
if !kubetypes.IsMirrorPod(p) {
// Check if the pod is a mirror pod
if _, ok := p.Annotations[v1.MirrorPodAnnotationKey]; !ok {
return false
}
return p.Spec.RestartPolicy != v1.RestartPolicyAlways
}
// NewExecPodSpec returns the pod spec of hostexec pod
func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
// NewAgnhostPod returns a pod that uses the agnhost image. The image's binary supports various subcommands
// that behave the same, no matter the underlying OS. If no args are given, it defaults to the pause subcommand.
// For more information about agnhost subcommands, see: https://github.com/kubernetes/kubernetes/tree/master/test/images/agnhost#agnhost
func NewAgnhostPod(ns, podName string, volumes []v1.Volume, mounts []v1.VolumeMount, ports []v1.ContainerPort, args ...string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: podName,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "agnhost",
Image: imageutils.GetE2EImage(imageutils.Agnhost),
ImagePullPolicy: v1.PullIfNotPresent,
},
NewAgnhostContainer("agnhost-container", mounts, ports, args...),
},
HostNetwork: hostNetwork,
Volumes: volumes,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &immediate,
},
@ -406,25 +431,35 @@ func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
return pod
}
// NewAgnhostContainer returns the container Spec of an agnhost container.
func NewAgnhostContainer(containerName string, mounts []v1.VolumeMount, ports []v1.ContainerPort, args ...string) v1.Container {
if len(args) == 0 {
args = []string{"pause"}
}
return v1.Container{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: args,
VolumeMounts: mounts,
Ports: ports,
SecurityContext: &v1.SecurityContext{},
ImagePullPolicy: v1.PullIfNotPresent,
}
}
// NewExecPodSpec returns the pod spec of hostexec pod
func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
pod := NewAgnhostPod(ns, name, nil, nil, nil)
pod.Spec.HostNetwork = hostNetwork
return pod
}
// newExecPodSpec returns the pod spec of exec pod
func newExecPodSpec(ns, generateName string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: ns,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &immediate,
Containers: []v1.Container{
{
Name: "agnhost-pause",
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"pause"},
},
},
},
}
// GenerateName is an optional prefix, used by the server,
// to generate a unique name ONLY IF the Name field has not been provided
pod := NewAgnhostPod(ns, "", nil, nil, nil)
pod.ObjectMeta.GenerateName = generateName
return pod
}
@ -441,14 +476,11 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return retrievedPod.Status.Phase == v1.PodRunning, nil
})
expectNoError(err)
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
return execPod
}
@ -498,25 +530,33 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
return getPodLogsInternal(c, namespace, podName, containerName, false, nil)
}
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
sinceTime := metav1.NewTime(since)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime)
}
// GetPreviousPodLogs returns the logs of the previous instance of the
// specified container (namespace/pod/container).
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
return getPodLogsInternal(c, namespace, podName, containerName, true, nil)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.CoreV1().RESTClient().Get().
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time) (string, error) {
request := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do(context.TODO()).
Raw()
Param("previous", strconv.FormatBool(previous))
if sinceTime != nil {
request.Param("sinceTime", sinceTime.Format(time.RFC3339))
}
logs, err := request.Do(context.TODO()).Raw()
if err != nil {
return "", err
}
@ -543,3 +583,72 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin
}
return filtered, nil
}
// GetPods return the label matched pods in the given ns
func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) {
label := labels.SelectorFromSet(matchLabels)
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts)
if err != nil {
return []v1.Pod{}, err
}
return pods.Items, nil
}
// GetPodSecretUpdateTimeout returns the timeout duration for updating pod secret.
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := getNodeTTLAnnotationValue(c)
if err != nil {
e2elog.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout
}
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
node := &nodes.Items[0]
if node.Annotations == nil {
return time.Duration(0), fmt.Errorf("No annotations found on the node")
}
value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]
if !ok {
return time.Duration(0), fmt.Errorf("No TTL annotation found on the node")
}
intValue, err := strconv.Atoi(value)
if err != nil {
return time.Duration(0), fmt.Errorf("Cannot convert TTL annotation from %#v to int", *node)
}
return time.Duration(intValue) * time.Second, nil
}
// FilterActivePods returns pods that have not terminated.
func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
var result []*v1.Pod
for _, p := range pods {
if IsPodActive(p) {
result = append(result, p)
} else {
klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp)
}
}
return result
}
// IsPodActive return true if the pod meets certain conditions.
func IsPodActive(p *v1.Pod) bool {
return v1.PodSucceeded != p.Status.Phase &&
v1.PodFailed != p.Status.Phase &&
p.DeletionTimestamp == nil
}

View File

@ -31,13 +31,10 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubectl/pkg/util/podutils"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
testutils "k8s.io/kubernetes/test/utils"
)
@ -70,8 +67,11 @@ const (
type podCondition func(pod *v1.Pod) (bool, error)
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration, err error) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
if err != nil {
errStr += fmt.Sprintf("Last error: %s\n", err)
}
// Print bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
@ -113,6 +113,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
badPods := []v1.Pod{}
desiredPods := 0
notReady := int32(0)
var lastAPIError error
if wait.PollImmediate(poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
@ -120,13 +121,13 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
// Clear API error from the last attempt in case the following calls succeed.
lastAPIError = nil
rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
lastAPIError = err
return false, err
}
for _, rc := range rcList.Items {
@ -136,10 +137,8 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
lastAPIError = err
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rs := range rsList.Items {
@ -149,10 +148,8 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
lastAPIError = err
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
nOk := int32(0)
@ -196,7 +193,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
return false, nil
}) != nil {
if !ignoreNotReady {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError))
}
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
}
@ -218,7 +215,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
}
// log now so that current pod info is reported before calling `condition()`
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
e2elog.Logf("Pod %q satisfied condition %q", podName, desc)
@ -304,7 +301,7 @@ func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, d
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
conditionNotMatch = append(conditionNotMatch, fmt.Sprintf("%s_%s(%s)", pod.Name, pod.Namespace, pod.UID))
}
}
if len(conditionNotMatch) <= 0 {
@ -400,9 +397,6 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
found := false
@ -428,40 +422,6 @@ func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
// WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running.
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return err
}
selector, err := e2eresource.GetSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := e2eresource.GetReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
}
return nil
}
// WaitForControlledPods waits up to podListTimeout for getting pods of the specified controller name and return them.
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return nil, err
}
selector, err := e2eresource.GetSelectorFromRuntimeObject(rtObject)
if err != nil {
return nil, err
}
return WaitForPodsWithLabel(c, ns, selector)
}
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
@ -487,9 +447,6 @@ func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selecto
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
continue
}
return
}
if len(pods.Items) > 0 {
@ -511,9 +468,6 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
e2elog.Logf("Failed to list pods: %v", err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
current = 0
@ -541,7 +495,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in
return false, nil
}
for _, pod := range pods.Items {
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
if !podutils.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
return false, nil
}
}
@ -574,3 +528,10 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
}
return podNames, nil
}
// WaitForPodContainerToFail waits for the given Pod container to fail with the given reason, specifically due to
// invalid container configuration. In this case, the container will remain in a waiting state with a specific
// reason set, which should match the given reason.
func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string, containerIndex int, reason string, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, podContainerFailed(c, namespace, podName, containerIndex, reason))
}

View File

@ -31,9 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubectl/pkg/util/podutils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -42,13 +40,27 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// DefaultPodDeletionTimeout is the default timeout for deleting pod
const DefaultPodDeletionTimeout = 3 * time.Minute
const (
// DefaultPodDeletionTimeout is the default timeout for deleting pod
DefaultPodDeletionTimeout = 3 * time.Minute
// ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and
// the images in the white list should be pre-pulled in the test suite. Currently, this is only used by
// the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events
killingContainer = "Killing"
// the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events
failedToCreateContainer = "Failed"
// the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events
startedContainer = "Started"
// it is copied from k8s.io/kubernetes/pkg/kubelet/sysctl
forbiddenReason = "SysctlForbidden"
)
// ImagePrePullList is the images used in the current test suite. It should be initialized in test suite and
// the images in the list should be pre-pulled in the test suite. Currently, this is only used by
// node e2e test.
var ImageWhiteList sets.String
var ImagePrePullList sets.String
// PodClient is a convenience method for getting a pod client interface in the framework's namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
@ -173,10 +185,10 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
// in the test anyway.
continue
}
// If the image policy is not PullAlways, the image must be in the white list and
// If the image policy is not PullAlways, the image must be in the pre-pull list and
// pre-pulled.
gomega.Expect(ImageWhiteList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in white list should have
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in pre-pull list should have
// been prepulled.
c.ImagePullPolicy = v1.PullNever
}
@ -197,7 +209,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
return false, nil
}
},
)).To(gomega.Succeed(), "wait for pod %q to success", name)
)).To(gomega.Succeed(), "wait for pod %q to succeed", name)
}
// WaitForFinish waits for pod to finish running, regardless of success or failure.
@ -227,10 +239,10 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
}
for _, e := range evnts.Items {
switch e.Reason {
case events.KillingContainer, events.FailedToCreateContainer, sysctl.ForbiddenReason:
case killingContainer, failedToCreateContainer, forbiddenReason:
ev = &e
return true, nil
case events.StartedContainer:
case startedContainer:
return true, nil
default:
// ignore all other errors
@ -262,5 +274,5 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
ExpectNoError(err)
return podutil.IsPodReady(pod)
return podutils.IsPodReady(pod)
}

32
vendor/k8s.io/kubernetes/test/e2e/framework/ports.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
// NOTE: constants in this file are copied from pkg/cluster/ports/ports.go
const (
// KubeletPort is the default port for the kubelet server on each host machine.
// May be overridden by a flag at startup.
KubeletPort = 10250
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
// Deprecated: use the secure KubeControllerManagerPort instead.
InsecureKubeControllerManagerPort = 10252
// KubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
KubeControllerManagerPort = 10257
)

View File

@ -19,6 +19,7 @@ package framework
import (
"context"
"fmt"
"strings"
"sync"
v1 "k8s.io/api/core/v1"
@ -29,16 +30,22 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
)
const (
podSecurityPolicyPrivileged = "e2e-test-privileged-psp"
// allowAny is the wildcard used to allow any profile.
allowAny = "*"
// allowedProfilesAnnotationKey specifies the allowed seccomp profiles.
allowedProfilesAnnotationKey = "seccomp.security.alpha.kubernetes.io/allowedProfileNames"
)
var (
@ -52,7 +59,7 @@ func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
return &policyv1beta1.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
Annotations: map[string]string{allowedProfilesAnnotationKey: allowAny},
},
Spec: policyv1beta1.PodSecurityPolicySpec{
Privileged: true,
@ -87,14 +94,34 @@ func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool {
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
isPSPEnabled = false
} else if psps == nil || len(psps.Items) == 0 {
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
isPSPEnabled = false
} else {
Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.")
isPSPEnabled = true
return
}
if psps == nil || len(psps.Items) == 0 {
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
return
}
Logf("Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled")
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{GenerateName: "psp-test-pod-"},
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "test", Image: imageutils.GetPauseImageName()}}},
}
dryRunPod, err := kubeClient.CoreV1().Pods("kube-system").Create(context.TODO(), testPod, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
if strings.Contains(err.Error(), "PodSecurityPolicy") {
Logf("PodSecurityPolicy error creating dryrun pod; assuming PodSecurityPolicy is enabled: %v", err)
isPSPEnabled = true
} else {
Logf("Error creating dryrun pod; assuming PodSecurityPolicy is disabled: %v", err)
}
return
}
pspAnnotation, pspAnnotationExists := dryRunPod.Annotations["kubernetes.io/psp"]
if !pspAnnotationExists {
Logf("No PSP annotation exists on dry run pod; assuming PodSecurityPolicy is disabled")
return
}
Logf("PSP annotation exists on dry run pod: %q; assuming PodSecurityPolicy is enabled", pspAnnotation)
isPSPEnabled = true
})
return isPSPEnabled
}
@ -123,7 +150,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
}
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
// Create the Role to bind it to the namespace.
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
@ -140,10 +167,10 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
}
})
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace))
err := auth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
podSecurityPolicyPrivileged,
namespace,
rbacv1.Subject{
@ -152,7 +179,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
Name: "default",
})
ExpectNoError(err)
ExpectNoError(auth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
}

View File

@ -6,8 +6,6 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/pv",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@ -17,6 +15,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/skipper:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)

View File

@ -19,6 +19,7 @@ package framework
import (
"context"
"fmt"
"k8s.io/kubernetes/test/e2e/storage/utils"
"time"
"github.com/onsi/ginkgo"
@ -29,8 +30,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
)
@ -53,6 +52,19 @@ const (
// VolumeSelectorKey is the key for volume selector.
VolumeSelectorKey = "e2e-pv-pool"
// isDefaultStorageClassAnnotation represents a StorageClass annotation that
// marks a class as the default StorageClass
isDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
// betaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation.
// TODO: remove Beta when no longer used
betaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
// volumeGidAnnotationKey is the of the annotation on the PersistentVolume
// object that specifies a supplemental GID.
// it is copied from k8s.io/kubernetes/pkg/volume/util VolumeGidAnnotationKey
volumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
)
var (
@ -117,6 +129,8 @@ type PersistentVolumeConfig struct {
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to
// generate a PVC object.
type PersistentVolumeClaimConfig struct {
// Name of the PVC. If set, overrides NamePrefix
Name string
// NamePrefix defaults to "pvc-" if unspecified
NamePrefix string
// ClaimSize must be specified in the Quantity format. Defaults to 2Gi if
@ -570,7 +584,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
GenerateName: pvConfig.NamePrefix,
Labels: pvConfig.Labels,
Annotations: map[string]string{
util.VolumeGidAnnotationKey: "777",
volumeGidAnnotationKey: "777",
},
},
Spec: v1.PersistentVolumeSpec{
@ -610,6 +624,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: cfg.Name,
GenerateName: cfg.NamePrefix,
Namespace: ns,
Annotations: cfg.Annotations,
@ -703,12 +718,12 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err)
continue
}
if pv.Status.Phase == phase {
@ -721,24 +736,25 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, Poll, timeout, true)
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, poll, timeout, true)
}
// WaitForPersistentVolumeClaimsPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims
// to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, Poll, timeout time.Duration, matchAny bool) error {
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, poll, timeout time.Duration, matchAny bool) error {
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, poll, err)
phaseFoundInAllClaims = false
break
}
if pvc.Status.Phase == phase {
framework.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
@ -779,7 +795,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
}
var scName string
for _, sc := range list.Items {
if storageutil.IsDefaultAnnotation(sc.ObjectMeta) {
if isDefaultAnnotation(sc.ObjectMeta) {
if len(scName) != 0 {
return "", fmt.Errorf("Multiple default storage classes found: %q and %q", scName, sc.Name)
}
@ -793,6 +809,20 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
return scName, nil
}
// isDefaultAnnotation returns a boolean if the default storage class
// annotation is set
// TODO: remove Beta when no longer needed
func isDefaultAnnotation(obj metav1.ObjectMeta) bool {
if obj.Annotations[isDefaultStorageClassAnnotation] == "true" {
return true
}
if obj.Annotations[betaIsDefaultStorageClassAnnotation] == "true" {
return true
}
return false
}
// SkipIfNoDefaultStorageClass skips tests if no default SC can be found.
func SkipIfNoDefaultStorageClass(c clientset.Interface) {
_, err := GetDefaultStorageClassName(c)
@ -800,3 +830,49 @@ func SkipIfNoDefaultStorageClass(c clientset.Interface) {
e2eskipper.Skipf("error finding default storageClass : %v", err)
}
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err == nil {
framework.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
}
if apierrors.IsNotFound(err) {
framework.Logf("PersistentVolume %s was removed", pvName)
return nil
}
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err)
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPVCFinalizer waits for a finalizer to be added to a PVC in a given namespace.
func WaitForPVCFinalizer(ctx context.Context, cs clientset.Interface, name, namespace, finalizer string, poll, timeout time.Duration) error {
var (
err error
pvc *v1.PersistentVolumeClaim
)
framework.Logf("Waiting up to %v for PersistentVolumeClaim %s/%s to contain finalizer %s", timeout, namespace, name, finalizer)
if successful := utils.WaitUntil(poll, timeout, func() bool {
pvc, err = cs.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get PersistentVolumeClaim %s/%s with err: %v. Will retry in %v", name, namespace, err, timeout)
return false
}
for _, f := range pvc.Finalizers {
if f == finalizer {
return true
}
}
return false
}); successful {
return nil
}
if err == nil {
err = fmt.Errorf("finalizer %s not added to pvc %s/%s", finalizer, namespace, name)
}
return err
}

View File

@ -1,33 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["runtimeobj.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/resource",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,130 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
)
var (
kindReplicationController = schema.GroupKind{Kind: "ReplicationController"}
kindExtensionsReplicaSet = schema.GroupKind{Group: "extensions", Kind: "ReplicaSet"}
kindAppsReplicaSet = schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}
kindExtensionsDeployment = schema.GroupKind{Group: "extensions", Kind: "Deployment"}
kindAppsDeployment = schema.GroupKind{Group: "apps", Kind: "Deployment"}
kindExtensionsDaemonSet = schema.GroupKind{Group: "extensions", Kind: "DaemonSet"}
kindBatchJob = schema.GroupKind{Group: "batch", Kind: "Job"}
)
// GetRuntimeObjectForKind returns a runtime.Object based on its GroupKind,
// namespace and name.
func GetRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case kindReplicationController:
return c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindExtensionsReplicaSet, kindAppsReplicaSet:
return c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindExtensionsDeployment, kindAppsDeployment:
return c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindExtensionsDaemonSet:
return c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindBatchJob:
return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}
}
// GetSelectorFromRuntimeObject returns the labels for the given object.
func GetSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensionsv1beta1.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *appsv1.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensionsv1beta1.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *appsv1.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensionsv1beta1.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *appsv1.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batchv1.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
default:
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
}
}
// GetReplicasFromRuntimeObject returns the number of replicas for the given
// object.
func GetReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensionsv1beta1.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *appsv1.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensionsv1beta1.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *appsv1.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensionsv1beta1.DaemonSet:
return 0, nil
case *appsv1.DaemonSet:
return 0, nil
case *batchv1.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
// that actually finish we need a better way to do this.
if typed.Spec.Parallelism != nil {
return *typed.Spec.Parallelism, nil
}
return 0, nil
default:
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
}
}

View File

@ -23,6 +23,7 @@ import (
"encoding/json"
"fmt"
"math"
"regexp"
"sort"
"strconv"
"strings"
@ -32,11 +33,10 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/system"
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@ -296,7 +296,7 @@ func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alp
data, err := c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Name(fmt.Sprintf("%v:%v", nodeName, KubeletPort)).
Suffix("stats/summary").
Do(ctx).Raw()
@ -371,6 +371,29 @@ const (
MasterAndDNSNodes NodesSet = 2
)
// nodeHasControlPlanePods returns true if specified node has control plane pods
// (kube-scheduler and/or kube-controller-manager).
func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, error) {
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) < 1 {
Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
}
for _, pod := range podList.Items {
if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) {
return true, nil
}
}
return false, nil
}
// NewResourceUsageGatherer returns a new ContainerResourceGatherer.
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
g := ContainerResourceGatherer{
@ -405,11 +428,23 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
dnsNodes := make(map[string]bool)
for _, pod := range pods.Items {
if (options.Nodes == MasterNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) {
continue
if options.Nodes == MasterNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
if !isControlPlane {
continue
}
}
if (options.Nodes == MasterAndDNSNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
continue
if options.Nodes == MasterAndDNSNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
if !isControlPlane && pod.Labels["k8s-app"] != "kube-dns" {
continue
}
}
for _, container := range pod.Status.InitContainerStatuses {
g.containerIDs = append(g.containerIDs, container.Name)
@ -428,7 +463,11 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
for _, node := range nodeList.Items {
if options.Nodes == AllNodes || system.DeprecatedMightBeMasterNode(node.Name) || dnsNodes[node.Name] {
isControlPlane, err := nodeHasControlPlanePods(c, node.Name)
if err != nil {
return nil, err
}
if options.Nodes == AllNodes || isControlPlane || dnsNodes[node.Name] {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
c: c,
@ -565,7 +604,7 @@ type kubemarkResourceUsage struct {
}
func getMasterUsageByPrefix(prefix string) (string, error) {
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), APIAddress()+":22", TestContext.Provider)
if err != nil {
return "", err
}

View File

@ -6,16 +6,18 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/skipper",
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],

View File

@ -30,19 +30,21 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
"k8s.io/component-base/featuregate"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
// TestContext should be used by all tests to access common context data.
var TestContext framework.TestContextType
// New local storage types to support local storage capacity isolation
var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
func skipInternalf(caller int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
@ -130,8 +132,8 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
// SkipUnlessLocalEphemeralStorageEnabled skips if the LocalStorageCapacityIsolation is not enabled.
func SkipUnlessLocalEphemeralStorageEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
skipInternalf(1, "Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation)
if !utilfeature.DefaultFeatureGate.Enabled(localStorageCapacityIsolation) {
skipInternalf(1, "Only supported when %v feature is enabled", localStorageCapacityIsolation)
}
}
@ -150,35 +152,35 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers
// SkipUnlessNodeCountIsAtLeast skips if the number of nodes is less than the minNodeCount.
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
skipInternalf(1, "Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
if framework.TestContext.CloudConfig.NumNodes < minNodeCount {
skipInternalf(1, "Requires at least %d nodes (not %d)", minNodeCount, framework.TestContext.CloudConfig.NumNodes)
}
}
// SkipUnlessNodeCountIsAtMost skips if the number of nodes is greater than the maxNodeCount.
func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
if TestContext.CloudConfig.NumNodes > maxNodeCount {
skipInternalf(1, "Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes)
if framework.TestContext.CloudConfig.NumNodes > maxNodeCount {
skipInternalf(1, "Requires at most %d nodes (not %d)", maxNodeCount, framework.TestContext.CloudConfig.NumNodes)
}
}
// SkipIfProviderIs skips if the provider is included in the unsupportedProviders.
func SkipIfProviderIs(unsupportedProviders ...string) {
if framework.ProviderIs(unsupportedProviders...) {
skipInternalf(1, "Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
skipInternalf(1, "Not supported for providers %v (found %s)", unsupportedProviders, framework.TestContext.Provider)
}
}
// SkipUnlessProviderIs skips if the provider is not included in the supportedProviders.
func SkipUnlessProviderIs(supportedProviders ...string) {
if !framework.ProviderIs(supportedProviders...) {
skipInternalf(1, "Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
skipInternalf(1, "Only supported for providers %v (not %s)", supportedProviders, framework.TestContext.Provider)
}
}
// SkipUnlessMultizone skips if the cluster does not have multizone.
func SkipUnlessMultizone(c clientset.Interface) {
zones, err := framework.GetClusterZones(c)
zones, err := e2enode.GetClusterZones(c)
if err != nil {
skipInternalf(1, "Error listing cluster zones")
}
@ -189,7 +191,7 @@ func SkipUnlessMultizone(c clientset.Interface) {
// SkipIfMultizone skips if the cluster has multizone.
func SkipIfMultizone(c clientset.Interface) {
zones, err := framework.GetClusterZones(c)
zones, err := e2enode.GetClusterZones(c)
if err != nil {
skipInternalf(1, "Error listing cluster zones")
}
@ -201,21 +203,28 @@ func SkipIfMultizone(c clientset.Interface) {
// SkipUnlessMasterOSDistroIs skips if the master OS distro is not included in the supportedMasterOsDistros.
func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) {
if !framework.MasterOSDistroIs(supportedMasterOsDistros...) {
skipInternalf(1, "Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro)
skipInternalf(1, "Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, framework.TestContext.MasterOSDistro)
}
}
// SkipUnlessNodeOSDistroIs skips if the node OS distro is not included in the supportedNodeOsDistros.
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
if !framework.NodeOSDistroIs(supportedNodeOsDistros...) {
skipInternalf(1, "Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
skipInternalf(1, "Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, framework.TestContext.NodeOSDistro)
}
}
// SkipUnlessNodeOSArchIs skips if the node OS distro is not included in the supportedNodeOsArchs.
func SkipUnlessNodeOSArchIs(supportedNodeOsArchs ...string) {
if !framework.NodeOSArchIs(supportedNodeOsArchs...) {
skipInternalf(1, "Only supported for node OS arch %v (not %s)", supportedNodeOsArchs, framework.TestContext.NodeOSArch)
}
}
// SkipIfNodeOSDistroIs skips if the node OS distro is included in the unsupportedNodeOsDistros.
func SkipIfNodeOSDistroIs(unsupportedNodeOsDistros ...string) {
if framework.NodeOSDistroIs(unsupportedNodeOsDistros...) {
skipInternalf(1, "Not supported for node OS distro %v (is %s)", unsupportedNodeOsDistros, TestContext.NodeOSDistro)
skipInternalf(1, "Not supported for node OS distro %v (is %s)", unsupportedNodeOsDistros, framework.TestContext.NodeOSDistro)
}
}
@ -232,8 +241,8 @@ func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersio
// SkipUnlessSSHKeyPresent skips if no SSH key is found.
func SkipUnlessSSHKeyPresent() {
if _, err := e2essh.GetSigner(TestContext.Provider); err != nil {
skipInternalf(1, "No SSH Key for provider %s: '%v'", TestContext.Provider, err)
if _, err := e2essh.GetSigner(framework.TestContext.Provider); err != nil {
skipInternalf(1, "No SSH Key for provider %s: '%v'", framework.TestContext.Provider, err)
}
}
@ -261,19 +270,41 @@ func SkipIfAppArmorNotSupported() {
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
func RunIfContainerRuntimeIs(runtimes ...string) {
for _, containerRuntime := range runtimes {
if containerRuntime == TestContext.ContainerRuntime {
if containerRuntime == framework.TestContext.ContainerRuntime {
return
}
}
skipInternalf(1, "Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes)
skipInternalf(1, "Skipped because container runtime %q is not in %s", framework.TestContext.ContainerRuntime, runtimes)
}
// RunIfSystemSpecNameIs runs if the system spec name is included in the names.
func RunIfSystemSpecNameIs(names ...string) {
for _, name := range names {
if name == TestContext.SystemSpecName {
if name == framework.TestContext.SystemSpecName {
return
}
}
skipInternalf(1, "Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
skipInternalf(1, "Skipped because system spec name %q is not in %v", framework.TestContext.SystemSpecName, names)
}
// SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem run if the component run as pods and client can delete them
func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(componentName string, c clientset.Interface, ns string, labelSet labels.Set) {
// verify if component run as pod
label := labels.SelectorFromSet(labelSet)
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts)
framework.Logf("SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem: %v, %v", pods, err)
if err != nil {
skipInternalf(1, "Skipped because client failed to get component:%s pod err:%v", componentName, err)
}
if len(pods.Items) == 0 {
skipInternalf(1, "Skipped because component:%s is not running as pod.", componentName)
}
// verify if client can delete pod
pod := pods.Items[0]
if err := c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
skipInternalf(1, "Skipped because client failed to delete component:%s pod, err:%v", componentName, err)
}
}

View File

@ -6,14 +6,12 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/ssh",
visibility = ["//visibility:public"],
deps = [
"//pkg/ssh:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
],

View File

@ -20,26 +20,27 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"time"
"github.com/onsi/gomega"
"golang.org/x/crypto/ssh"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
sshutil "k8s.io/kubernetes/pkg/ssh"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// ssh port
sshPort = "22"
// SSHPort is tcp port number of SSH
SSHPort = "22"
// pollNodeInterval is how often to Poll pods.
pollNodeInterval = 2 * time.Second
@ -54,7 +55,7 @@ const (
func GetSigner(provider string) (ssh.Signer, error) {
// honor a consistent SSH key across all providers
if path := os.Getenv("KUBE_SSH_KEY_PATH"); len(path) > 0 {
return sshutil.MakePrivateKeySignerFromFile(path)
return makePrivateKeySignerFromFile(path)
}
// Select the key itself to use. When implementing more providers here,
@ -93,11 +94,25 @@ func GetSigner(provider string) (ssh.Signer, error) {
keyfile = filepath.Join(keydir, keyfile)
}
return sshutil.MakePrivateKeySignerFromFile(keyfile)
return makePrivateKeySignerFromFile(keyfile)
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this
// excludes master node. If it can't find any external IPs, it falls back to
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
buffer, err := ioutil.ReadFile(key)
if err != nil {
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err)
}
signer, err := ssh.ParsePrivateKey(buffer)
if err != nil {
return nil, fmt.Errorf("error parsing SSH key: '%v'", err)
}
return signer, err
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes.
// If it can't find any external IPs, it falls back to
// looking for internal IPs. If it can't find an internal IP for every node it
// returns an error, though it still returns all hosts that it found in that
// case.
@ -120,7 +135,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
sshHosts = append(sshHosts, net.JoinHostPort(h, SSHPort))
}
return sshHosts, nil
}
@ -139,7 +154,7 @@ type Result struct {
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd, provider string) (Result, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), provider)
return SSH(cmd, net.JoinHostPort(nodeName, SSHPort), provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
@ -169,7 +184,7 @@ func SSH(cmd, host, provider string) (Result, error) {
return result, err
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
stdout, stderr, code, err := runSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
@ -177,6 +192,60 @@ func SSH(cmd, host, provider string) (Result, error) {
return result, err
}
// runSSHCommandViaBastion returns the stdout, stderr, and exit code from running cmd on
// host as specific user, along with any SSH-level error.
func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, int, error) {
if user == "" {
user = os.Getenv("USER")
}
// Setup the config, dial the server, and open a session.
config := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
client, err := ssh.Dial("tcp", host, config)
if err != nil {
err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
if client, err = ssh.Dial("tcp", host, config); err != nil {
return false, err
}
return true, nil
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err)
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err)
}
defer session.Close()
// Run the command.
code := 0
var bout, berr bytes.Buffer
session.Stdout, session.Stderr = &bout, &berr
if err = session.Run(cmd); err != nil {
// Check whether the command failed to run or didn't complete.
if exiterr, ok := err.(*ssh.ExitError); ok {
// If we got an ExitError and the exit code is nonzero, we'll
// consider the SSH itself successful (just that the command run
// errored on the host).
if code = exiterr.ExitStatus(); code != 0 {
err = nil
}
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err
}
// runSSHCommandViaBastion returns the stdout, stderr, and exit code from running cmd on
// host as specific user, along with any SSH-level error. It uses an SSH proxy to connect
// to bastion, then via that tunnel connects to the remote host. Similar to
@ -260,7 +329,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
host = net.JoinHostPort(a.Address, SSHPort)
break
}
}
@ -269,7 +338,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
// No external IPs were found, let's try to use internal as plan B
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeInternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
host = net.JoinHostPort(a.Address, SSHPort)
break
}
}
@ -323,9 +392,6 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil

View File

@ -17,9 +17,12 @@ limitations under the License.
package framework
import (
"crypto/rand"
"encoding/base64"
"flag"
"fmt"
"io/ioutil"
"math"
"os"
"sort"
"strings"
@ -31,12 +34,13 @@ import (
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog"
"k8s.io/klog/v2"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
const (
defaultHost = "http://127.0.0.1:8080"
defaultHost = "https://127.0.0.1:6443"
// DefaultNumNodes is the number of nodes. If not specified, then number of nodes is auto-detected
DefaultNumNodes = -1
@ -77,12 +81,16 @@ type TestContextType struct {
KubeVolumeDir string
CertDir string
Host string
BearerToken string `datapolicy:"token"`
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
RepoRoot string
DockershimCheckpointDir string
// ListImages will list off all images that are used then quit
ListImages bool
// ListConformanceTests will list off all conformance tests that are available then quit
ListConformanceTests bool
// Provider identifies the infrastructure provider (gce, gke, aws)
Provider string
@ -113,6 +121,7 @@ type TestContextType struct {
ImageServiceEndpoint string
MasterOSDistro string
NodeOSDistro string
NodeOSArch string
VerifyServiceAccount bool
DeleteNamespace bool
DeleteNamespaceOnFailure bool
@ -150,9 +159,6 @@ type TestContextType struct {
// Node e2e specific test context
NodeTestContextType
// Indicates what path the kubernetes-anywhere is installed on
KubernetesAnywherePath string
// The DNS Domain of the cluster.
ClusterDNSDomain string
@ -173,6 +179,9 @@ type TestContextType struct {
// SpecSummaryOutput is the file to write ginkgo.SpecSummary objects to as tests complete. Useful for debugging and test introspection.
SpecSummaryOutput string
// DockerConfigFile is a file that contains credentials which can be used to pull images from certain private registries, needed for a test.
DockerConfigFile string
}
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
@ -282,7 +291,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.BoolVar(&TestContext.DeleteNamespaceOnFailure, "delete-namespace-on-failure", true, "If true, framework will delete test namespace on failure. Used only during test debugging.")
flags.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If non-zero, framework will allow for that many non-ready nodes when checking for all ready nodes.")
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set", defaultHost))
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost))
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flags.Var(cliflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
@ -294,14 +303,15 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.")
flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
flags.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
flags.StringVar(&TestContext.KubernetesAnywherePath, "kubernetes-anywhere-path", "/workspace/k8s.io/kubernetes-anywhere", "Which directory kubernetes-anywhere is installed to.")
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.")
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.")
flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.")
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.")
flags.StringVar(&TestContext.DockerConfigFile, "docker-config-file", "", "A file that contains credentials which can be used to pull images from certain private registries, needed for a test.")
}
// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
@ -320,6 +330,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
flags.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, ubuntu, gci, coreos, or custom).")
flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, or custom).")
flags.StringVar(&TestContext.NodeOSArch, "node-os-arch", "amd64", "The OS architecture of cluster VM instances (amd64, arm64, or custom).")
flags.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.")
// TODO: Flags per provider? Rename gce-project/gce-zone?
@ -396,6 +407,21 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
return configCmd
}
// GenerateSecureToken returns a string of length tokenLen, consisting
// of random bytes encoded as base64 for use as a Bearer Token during
// communication with an APIServer
func GenerateSecureToken(tokenLen int) (string, error) {
// Number of bytes to be tokenLen when base64 encoded.
tokenSize := math.Ceil(float64(tokenLen) * 6 / 8)
rawToken := make([]byte, int(tokenSize))
if _, err := rand.Read(rawToken); err != nil {
return "", err
}
encoded := base64.RawURLEncoding.EncodeToString(rawToken)
token := encoded[:tokenLen]
return token, nil
}
// AfterReadingAllFlags makes changes to the context after all flags
// have been read.
func AfterReadingAllFlags(t *TestContextType) {
@ -415,6 +441,14 @@ func AfterReadingAllFlags(t *TestContextType) {
t.Host = defaultHost
}
}
if len(t.BearerToken) == 0 {
var err error
t.BearerToken, err = GenerateSecureToken(16)
if err != nil {
klog.Fatalf("Failed to generate bearer token: %v", err)
}
}
// Allow 1% of nodes to be unready (statistically) - relevant for large clusters.
if t.AllowedNotReadyNodes == 0 {
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100

View File

@ -0,0 +1,22 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["testfiles.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/testfiles",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,178 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testfiles provides a wrapper around various optional ways
// of retrieving additional files needed during a test run:
// - builtin bindata
// - filesystem access
//
// Because it is a is self-contained package, it can be used by
// test/e2e/framework and test/e2e/manifest without creating
// a circular dependency.
package testfiles
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
)
var filesources []FileSource
// AddFileSource registers another provider for files that may be
// needed at runtime. Should be called during initialization of a test
// binary.
func AddFileSource(filesource FileSource) {
filesources = append(filesources, filesource)
}
// FileSource implements one way of retrieving test file content. For
// example, one file source could read from the original source code
// file tree, another from bindata compiled into a test executable.
type FileSource interface {
// ReadTestFile retrieves the content of a file that gets maintained
// alongside a test's source code. Files are identified by the
// relative path inside the repository containing the tests, for
// example "cluster/gce/upgrade.sh" inside kubernetes/kubernetes.
//
// When the file is not found, a nil slice is returned. An error is
// returned for all fatal errors.
ReadTestFile(filePath string) ([]byte, error)
// DescribeFiles returns a multi-line description of which
// files are available via this source. It is meant to be
// used as part of the error message when a file cannot be
// found.
DescribeFiles() string
}
// Read tries to retrieve the desired file content from
// one of the registered file sources.
func Read(filePath string) ([]byte, error) {
if len(filesources) == 0 {
return nil, fmt.Errorf("no file sources registered (yet?), cannot retrieve test file %s", filePath)
}
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err)
}
if data != nil {
return data, nil
}
}
// Here we try to generate an error that points test authors
// or users in the right direction for resolving the problem.
error := fmt.Sprintf("Test file %q was not found.\n", filePath)
for _, filesource := range filesources {
error += filesource.DescribeFiles()
error += "\n"
}
return nil, errors.New(error)
}
// Exists checks whether a file could be read. Unexpected errors
// are handled by calling the fail function, which then should
// abort the current test.
func Exists(filePath string) (bool, error) {
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
return false, err
}
if data != nil {
return true, nil
}
}
return false, nil
}
// RootFileSource looks for files relative to a root directory.
type RootFileSource struct {
Root string
}
// ReadTestFile looks for the file relative to the configured
// root directory. If the path is already absolute, for example
// in a test that has its own method of determining where
// files are, then the path will be used directly.
func (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) {
var fullPath string
if path.IsAbs(filePath) {
fullPath = filePath
} else {
fullPath = filepath.Join(r.Root, filePath)
}
data, err := ioutil.ReadFile(fullPath)
if os.IsNotExist(err) {
// Not an error (yet), some other provider may have the file.
return nil, nil
}
return data, err
}
// DescribeFiles explains that it looks for files inside a certain
// root directory.
func (r RootFileSource) DescribeFiles() string {
description := fmt.Sprintf("Test files are expected in %q", r.Root)
if !path.IsAbs(r.Root) {
// The default in test_context.go is the relative path
// ../../, which doesn't really help locating the
// actual location. Therefore we add also the absolute
// path if necessary.
abs, err := filepath.Abs(r.Root)
if err == nil {
description += fmt.Sprintf(" = %q", abs)
}
}
description += "."
return description
}
// BindataFileSource handles files stored in a package generated with bindata.
type BindataFileSource struct {
Asset func(string) ([]byte, error)
AssetNames func() []string
}
// ReadTestFile looks for an asset with the given path.
func (b BindataFileSource) ReadTestFile(filePath string) ([]byte, error) {
fileBytes, err := b.Asset(filePath)
if err != nil {
// It would be nice to have a better way to detect
// "not found" errors :-/
if strings.HasSuffix(err.Error(), "not found") {
return nil, nil
}
}
return fileBytes, nil
}
// DescribeFiles explains about gobindata and then lists all available files.
func (b BindataFileSource) DescribeFiles() string {
var lines []string
lines = append(lines, "The following files are built into the test executable via gobindata. For questions on maintaining gobindata, contact the sig-testing group.")
assets := b.AssetNames()
sort.Strings(assets)
lines = append(lines, assets...)
description := strings.Join(lines, "\n ")
return description
}

File diff suppressed because it is too large Load Diff