rebase: update kubernetes to v1.20.0

updated kubernetes packages to latest
release.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2020-12-17 17:58:29 +05:30
committed by mergify[bot]
parent 4abe128bd8
commit 83559144b1
1624 changed files with 247222 additions and 160270 deletions

View File

@ -1,310 +1,274 @@
{
"Rules": [
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/",
"AllowedPrefixes": [
"k8s.io/kubernetes/pkg/api/legacyscheme",
"k8s.io/kubernetes/pkg/api/service",
"k8s.io/kubernetes/pkg/api/v1/pod",
"k8s.io/kubernetes/pkg/api/v1/resource",
"k8s.io/kubernetes/pkg/api/v1/service",
"k8s.io/kubernetes/pkg/apis/apps",
"k8s.io/kubernetes/pkg/apis/apps/validation",
"k8s.io/kubernetes/pkg/apis/autoscaling",
"k8s.io/kubernetes/pkg/apis/batch",
"k8s.io/kubernetes/pkg/apis/core",
"k8s.io/kubernetes/pkg/apis/core/helper",
"k8s.io/kubernetes/pkg/apis/core/install",
"k8s.io/kubernetes/pkg/apis/core/pods",
"k8s.io/kubernetes/pkg/apis/core/v1",
"k8s.io/kubernetes/pkg/apis/core/v1/helper",
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos",
"k8s.io/kubernetes/pkg/apis/core/validation",
"k8s.io/kubernetes/pkg/apis/extensions",
"k8s.io/kubernetes/pkg/apis/networking",
"k8s.io/kubernetes/pkg/apis/policy",
"k8s.io/kubernetes/pkg/apis/policy/validation",
"k8s.io/kubernetes/pkg/apis/scheduling",
"k8s.io/kubernetes/pkg/apis/storage/v1/util",
"k8s.io/kubernetes/pkg/capabilities",
"k8s.io/kubernetes/pkg/client/conditions",
"k8s.io/kubernetes/pkg/cloudprovider/providers",
"k8s.io/kubernetes/pkg/controller",
"k8s.io/kubernetes/pkg/controller/deployment/util",
"k8s.io/kubernetes/pkg/controller/nodelifecycle",
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler",
"k8s.io/kubernetes/pkg/controller/service",
"k8s.io/kubernetes/pkg/controller/util/node",
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util",
"k8s.io/kubernetes/pkg/controller/volume/scheduling",
"k8s.io/kubernetes/pkg/credentialprovider",
"k8s.io/kubernetes/pkg/credentialprovider/aws",
"k8s.io/kubernetes/pkg/credentialprovider/azure",
"k8s.io/kubernetes/pkg/credentialprovider/gcp",
"k8s.io/kubernetes/pkg/credentialprovider/secrets",
"k8s.io/kubernetes/pkg/features",
"k8s.io/kubernetes/pkg/fieldpath",
"k8s.io/kubernetes/pkg/kubectl",
"k8s.io/kubernetes/pkg/kubectl/apps",
"k8s.io/kubernetes/pkg/kubectl/describe",
"k8s.io/kubernetes/pkg/kubectl/describe/versioned",
"k8s.io/kubernetes/pkg/kubectl/scheme",
"k8s.io/kubernetes/pkg/kubectl/util",
"k8s.io/kubernetes/pkg/kubectl/util/certificate",
"k8s.io/kubernetes/pkg/kubectl/util/deployment",
"k8s.io/kubernetes/pkg/kubectl/util/event",
"k8s.io/kubernetes/pkg/kubectl/util/fieldpath",
"k8s.io/kubernetes/pkg/kubectl/util/podutils",
"k8s.io/kubernetes/pkg/kubectl/util/qos",
"k8s.io/kubernetes/pkg/kubectl/util/rbac",
"k8s.io/kubernetes/pkg/kubectl/util/resource",
"k8s.io/kubernetes/pkg/kubectl/util/slice",
"k8s.io/kubernetes/pkg/kubectl/util/storage",
"k8s.io/kubernetes/pkg/kubelet",
"k8s.io/kubernetes/pkg/kubelet/apis",
"k8s.io/kubernetes/pkg/kubelet/apis/config",
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1",
"k8s.io/kubernetes/pkg/kubelet/cadvisor",
"k8s.io/kubernetes/pkg/kubelet/certificate",
"k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap",
"k8s.io/kubernetes/pkg/kubelet/checkpoint",
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager",
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum",
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors",
"k8s.io/kubernetes/pkg/kubelet/cloudresource",
"k8s.io/kubernetes/pkg/kubelet/cm",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state",
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology",
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset",
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager",
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint",
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager",
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask",
"k8s.io/kubernetes/pkg/kubelet/cm/util",
"k8s.io/kubernetes/pkg/kubelet/config",
"k8s.io/kubernetes/pkg/kubelet/configmap",
"k8s.io/kubernetes/pkg/kubelet/container",
"k8s.io/kubernetes/pkg/kubelet/dockershim",
"k8s.io/kubernetes/pkg/kubelet/dockershim/cm",
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker",
"k8s.io/kubernetes/pkg/kubelet/dockershim/metrics",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet",
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics",
"k8s.io/kubernetes/pkg/kubelet/dockershim/remote",
"k8s.io/kubernetes/pkg/kubelet/envvars",
"k8s.io/kubernetes/pkg/kubelet/eviction",
"k8s.io/kubernetes/pkg/kubelet/eviction/api",
"k8s.io/kubernetes/pkg/kubelet/events",
"k8s.io/kubernetes/pkg/kubelet/images",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log",
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic",
"k8s.io/kubernetes/pkg/kubelet/kuberuntime",
"k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs",
"k8s.io/kubernetes/pkg/kubelet/leaky",
"k8s.io/kubernetes/pkg/kubelet/lifecycle",
"k8s.io/kubernetes/pkg/kubelet/logs",
"k8s.io/kubernetes/pkg/kubelet/metrics",
"k8s.io/kubernetes/pkg/kubelet/network/dns",
"k8s.io/kubernetes/pkg/kubelet/nodelease",
"k8s.io/kubernetes/pkg/kubelet/nodestatus",
"k8s.io/kubernetes/pkg/kubelet/oom",
"k8s.io/kubernetes/pkg/kubelet/pleg",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2",
"k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler",
"k8s.io/kubernetes/pkg/kubelet/pod",
"k8s.io/kubernetes/pkg/kubelet/preemption",
"k8s.io/kubernetes/pkg/kubelet/prober",
"k8s.io/kubernetes/pkg/kubelet/prober/results",
"k8s.io/kubernetes/pkg/kubelet/qos",
"k8s.io/kubernetes/pkg/kubelet/remote",
"k8s.io/kubernetes/pkg/kubelet/runtimeclass",
"k8s.io/kubernetes/pkg/kubelet/server",
"k8s.io/kubernetes/pkg/kubelet/server/metrics",
"k8s.io/kubernetes/pkg/kubelet/server/portforward",
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand",
"k8s.io/kubernetes/pkg/kubelet/server/stats",
"k8s.io/kubernetes/pkg/kubelet/server/streaming",
"k8s.io/kubernetes/pkg/kubelet/stats",
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit",
"k8s.io/kubernetes/pkg/kubelet/status",
"k8s.io/kubernetes/pkg/kubelet/secret",
"k8s.io/kubernetes/pkg/kubelet/sysctl",
"k8s.io/kubernetes/pkg/kubelet/types",
"k8s.io/kubernetes/pkg/kubelet/token",
"k8s.io/kubernetes/pkg/kubelet/util",
"k8s.io/kubernetes/pkg/kubelet/util/format",
"k8s.io/kubernetes/pkg/kubelet/util/manager",
"k8s.io/kubernetes/pkg/kubelet/util/store",
"k8s.io/kubernetes/pkg/kubelet/volumemanager",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler",
"k8s.io/kubernetes/pkg/kubemark",
"k8s.io/kubernetes/pkg/master/ports",
"k8s.io/kubernetes/pkg/probe",
"k8s.io/kubernetes/pkg/probe/exec",
"k8s.io/kubernetes/pkg/probe/http",
"k8s.io/kubernetes/pkg/probe/tcp",
"k8s.io/kubernetes/pkg/proxy",
"k8s.io/kubernetes/pkg/proxy/apis",
"k8s.io/kubernetes/pkg/proxy/apis/config",
"k8s.io/kubernetes/pkg/proxy/apis/config/scheme",
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1",
"k8s.io/kubernetes/pkg/proxy/apis/config/validation",
"k8s.io/kubernetes/pkg/proxy/config",
"k8s.io/kubernetes/pkg/proxy/healthcheck",
"k8s.io/kubernetes/pkg/proxy/iptables",
"k8s.io/kubernetes/pkg/proxy/ipvs",
"k8s.io/kubernetes/pkg/proxy/metaproxier",
"k8s.io/kubernetes/pkg/proxy/metrics",
"k8s.io/kubernetes/pkg/proxy/userspace",
"k8s.io/kubernetes/pkg/proxy/util",
"k8s.io/kubernetes/pkg/registry/core/service/allocator",
"k8s.io/kubernetes/pkg/registry/core/service/portallocator",
"k8s.io/kubernetes/pkg/scheduler/api",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports",
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
"k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1",
"k8s.io/kubernetes/pkg/scheduler/listers",
"k8s.io/kubernetes/pkg/scheduler/metrics",
"k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/scheduler/util",
"k8s.io/kubernetes/pkg/scheduler/volumebinder",
"k8s.io/kubernetes/pkg/security/apparmor",
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp",
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl",
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/util",
"k8s.io/kubernetes/pkg/securitycontext",
"k8s.io/kubernetes/pkg/serviceaccount",
"k8s.io/kubernetes/pkg/ssh",
"k8s.io/kubernetes/pkg/util/async",
"k8s.io/kubernetes/pkg/util/bandwidth",
"k8s.io/kubernetes/pkg/util/config",
"k8s.io/kubernetes/pkg/util/configz",
"k8s.io/kubernetes/pkg/util/conntrack",
"k8s.io/kubernetes/pkg/util/ebtables",
"k8s.io/kubernetes/pkg/util/env",
"k8s.io/kubernetes/pkg/util/filesystem",
"k8s.io/kubernetes/pkg/util/flag",
"k8s.io/kubernetes/pkg/util/flock",
"k8s.io/kubernetes/pkg/util/goroutinemap",
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff",
"k8s.io/kubernetes/pkg/util/hash",
"k8s.io/kubernetes/pkg/util/ipset",
"k8s.io/kubernetes/pkg/util/iptables",
"k8s.io/kubernetes/pkg/util/ipvs",
"k8s.io/kubernetes/pkg/util/labels",
"k8s.io/kubernetes/pkg/util/node",
"k8s.io/kubernetes/pkg/util/oom",
"k8s.io/kubernetes/pkg/util/parsers",
"k8s.io/kubernetes/pkg/util/pod",
"k8s.io/kubernetes/pkg/util/procfs",
"k8s.io/kubernetes/pkg/util/removeall",
"k8s.io/kubernetes/pkg/util/resizefs",
"k8s.io/kubernetes/pkg/util/rlimit",
"k8s.io/kubernetes/pkg/util/selinux",
"k8s.io/kubernetes/pkg/util/slice",
"k8s.io/kubernetes/pkg/util/sysctl",
"k8s.io/kubernetes/pkg/util/system",
"k8s.io/kubernetes/pkg/util/tail",
"k8s.io/kubernetes/pkg/util/taints",
"k8s.io/kubernetes/pkg/volume",
"k8s.io/kubernetes/pkg/volume/util",
"k8s.io/kubernetes/pkg/volume/util/fs",
"k8s.io/kubernetes/pkg/volume/util/fsquota",
"k8s.io/kubernetes/pkg/volume/util/recyclerclient",
"k8s.io/kubernetes/pkg/volume/util/subpath",
"k8s.io/kubernetes/pkg/volume/util/types",
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/kubernetes/test/",
"AllowedPrefixes": [
"k8s.io/kubernetes/test/e2e/framework",
"k8s.io/kubernetes/test/e2e/framework/auth",
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper",
"k8s.io/kubernetes/test/e2e/framework/kubectl",
"k8s.io/kubernetes/test/e2e/framework/log",
"k8s.io/kubernetes/test/e2e/framework/metrics",
"k8s.io/kubernetes/test/e2e/framework/network",
"k8s.io/kubernetes/test/e2e/framework/node",
"k8s.io/kubernetes/test/e2e/framework/pod",
"k8s.io/kubernetes/test/e2e/framework/rc",
"k8s.io/kubernetes/test/e2e/framework/resource",
"k8s.io/kubernetes/test/e2e/framework/service",
"k8s.io/kubernetes/test/e2e/framework/ssh",
"k8s.io/kubernetes/test/e2e/framework/testfiles",
"k8s.io/kubernetes/test/e2e/manifest",
"k8s.io/kubernetes/test/e2e/perftype",
"k8s.io/kubernetes/test/e2e/storage/utils",
"k8s.io/kubernetes/test/e2e/system",
"k8s.io/kubernetes/test/utils",
"k8s.io/kubernetes/test/utils/image"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/kubernetes/third_party/",
"AllowedPrefixes": [
"k8s.io/kubernetes/third_party/forked/golang/expansion",
"k8s.io/kubernetes/third_party/forked/ipvs"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/utils/",
"AllowedPrefixes": [
"k8s.io/utils/buffer",
"k8s.io/utils/exec",
"k8s.io/utils/inotify",
"k8s.io/utils/integer",
"k8s.io/utils/io",
"k8s.io/utils/keymutex",
"k8s.io/utils/mount",
"k8s.io/utils/net",
"k8s.io/utils/nsenter",
"k8s.io/utils/path",
"k8s.io/utils/pointer",
"k8s.io/utils/strings",
"k8s.io/utils/trace"
],
"ForbiddenPrefixes": []
},
{
"SelectorRegexp": "k8s[.]io/(api/|apimachinery/|apiextensions-apiserver/|apiserver/)",
"AllowedPrefixes": [
""
]
},
{
"SelectorRegexp": "k8s[.]io/client-go/",
"AllowedPrefixes": [
""
]
}
]
}
rules:
- selectorRegexp: k8s[.]io/kubernetes/pkg/
allowedPrefixes:
- k8s.io/kubernetes/pkg/api/legacyscheme
- k8s.io/kubernetes/pkg/api/service
- k8s.io/kubernetes/pkg/api/v1/pod
- k8s.io/kubernetes/pkg/api/v1/resource
- k8s.io/kubernetes/pkg/api/v1/service
- k8s.io/kubernetes/pkg/api/pod
- k8s.io/kubernetes/pkg/apis/apps
- k8s.io/kubernetes/pkg/apis/apps/validation
- k8s.io/kubernetes/pkg/apis/autoscaling
- k8s.io/kubernetes/pkg/apis/batch
- k8s.io/kubernetes/pkg/apis/certificates
- k8s.io/kubernetes/pkg/apis/certificates/v1
- k8s.io/kubernetes/pkg/apis/core
- k8s.io/kubernetes/pkg/apis/core/helper
- k8s.io/kubernetes/pkg/apis/core/install
- k8s.io/kubernetes/pkg/apis/core/pods
- k8s.io/kubernetes/pkg/apis/core/v1
- k8s.io/kubernetes/pkg/apis/core/v1/helper
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
- k8s.io/kubernetes/pkg/apis/core/validation
- k8s.io/kubernetes/pkg/apis/extensions
- k8s.io/kubernetes/pkg/apis/networking
- k8s.io/kubernetes/pkg/apis/policy
- k8s.io/kubernetes/pkg/apis/policy/validation
- k8s.io/kubernetes/pkg/apis/scheduling
- k8s.io/kubernetes/pkg/apis/storage/v1/util
- k8s.io/kubernetes/pkg/capabilities
- k8s.io/kubernetes/pkg/client/conditions
- k8s.io/kubernetes/pkg/cloudprovider/providers
- k8s.io/kubernetes/pkg/controller
- k8s.io/kubernetes/pkg/controller/deployment/util
- k8s.io/kubernetes/pkg/controller/nodelifecycle
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/controller/volume/scheduling
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
- k8s.io/kubernetes/pkg/credentialprovider/gcp
- k8s.io/kubernetes/pkg/credentialprovider/secrets
- k8s.io/kubernetes/pkg/features
- k8s.io/kubernetes/pkg/fieldpath
- k8s.io/kubernetes/pkg/kubectl
- k8s.io/kubernetes/pkg/kubectl/apps
- k8s.io/kubernetes/pkg/kubectl/describe
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
- k8s.io/kubernetes/pkg/kubectl/scheme
- k8s.io/kubernetes/pkg/kubectl/util
- k8s.io/kubernetes/pkg/kubectl/util/certificate
- k8s.io/kubernetes/pkg/kubectl/util/deployment
- k8s.io/kubernetes/pkg/kubectl/util/event
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
- k8s.io/kubernetes/pkg/kubectl/util/podutils
- k8s.io/kubernetes/pkg/kubectl/util/qos
- k8s.io/kubernetes/pkg/kubectl/util/rbac
- k8s.io/kubernetes/pkg/kubectl/util/resource
- k8s.io/kubernetes/pkg/kubectl/util/slice
- k8s.io/kubernetes/pkg/kubectl/util/storage
- k8s.io/kubernetes/pkg/kubelet
- k8s.io/kubernetes/pkg/kubelet/apis
- k8s.io/kubernetes/pkg/kubelet/apis/config
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
- k8s.io/kubernetes/pkg/kubelet/cadvisor
- k8s.io/kubernetes/pkg/kubelet/certificate
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
- k8s.io/kubernetes/pkg/kubelet/checkpoint
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
- k8s.io/kubernetes/pkg/kubelet/cloudresource
- k8s.io/kubernetes/pkg/kubelet/cm
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
- k8s.io/kubernetes/pkg/kubelet/cm/util
- k8s.io/kubernetes/pkg/kubelet/config
- k8s.io/kubernetes/pkg/kubelet/configmap
- k8s.io/kubernetes/pkg/kubelet/container
- k8s.io/kubernetes/pkg/kubelet/dockershim
- k8s.io/kubernetes/pkg/kubelet/dockershim/cm
- k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker
- k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/network
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet
- k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics
- k8s.io/kubernetes/pkg/kubelet/dockershim/remote
- k8s.io/kubernetes/pkg/kubelet/envvars
- k8s.io/kubernetes/pkg/kubelet/eviction
- k8s.io/kubernetes/pkg/kubelet/eviction/api
- k8s.io/kubernetes/pkg/kubelet/events
- k8s.io/kubernetes/pkg/kubelet/images
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
- k8s.io/kubernetes/pkg/kubelet/leaky
- k8s.io/kubernetes/pkg/kubelet/lifecycle
- k8s.io/kubernetes/pkg/kubelet/logs
- k8s.io/kubernetes/pkg/kubelet/metrics
- k8s.io/kubernetes/pkg/kubelet/network/dns
- k8s.io/kubernetes/pkg/kubelet/nodelease
- k8s.io/kubernetes/pkg/kubelet/nodestatus
- k8s.io/kubernetes/pkg/kubelet/oom
- k8s.io/kubernetes/pkg/kubelet/pleg
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
- k8s.io/kubernetes/pkg/kubelet/pod
- k8s.io/kubernetes/pkg/kubelet/preemption
- k8s.io/kubernetes/pkg/kubelet/prober
- k8s.io/kubernetes/pkg/kubelet/prober/results
- k8s.io/kubernetes/pkg/kubelet/qos
- k8s.io/kubernetes/pkg/kubelet/remote
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
- k8s.io/kubernetes/pkg/kubelet/server
- k8s.io/kubernetes/pkg/kubelet/server/metrics
- k8s.io/kubernetes/pkg/kubelet/server/portforward
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
- k8s.io/kubernetes/pkg/kubelet/server/stats
- k8s.io/kubernetes/pkg/kubelet/server/streaming
- k8s.io/kubernetes/pkg/kubelet/stats
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
- k8s.io/kubernetes/pkg/kubelet/status
- k8s.io/kubernetes/pkg/kubelet/secret
- k8s.io/kubernetes/pkg/kubelet/sysctl
- k8s.io/kubernetes/pkg/kubelet/types
- k8s.io/kubernetes/pkg/kubelet/token
- k8s.io/kubernetes/pkg/kubelet/util
- k8s.io/kubernetes/pkg/kubelet/util/format
- k8s.io/kubernetes/pkg/kubelet/util/manager
- k8s.io/kubernetes/pkg/kubelet/util/store
- k8s.io/kubernetes/pkg/kubelet/volumemanager
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
- k8s.io/kubernetes/pkg/kubemark
- k8s.io/kubernetes/pkg/cluster/ports
- k8s.io/kubernetes/pkg/probe
- k8s.io/kubernetes/pkg/probe/exec
- k8s.io/kubernetes/pkg/probe/http
- k8s.io/kubernetes/pkg/probe/tcp
- k8s.io/kubernetes/pkg/proxy
- k8s.io/kubernetes/pkg/proxy/apis
- k8s.io/kubernetes/pkg/proxy/apis/config
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
- k8s.io/kubernetes/pkg/proxy/config
- k8s.io/kubernetes/pkg/proxy/healthcheck
- k8s.io/kubernetes/pkg/proxy/iptables
- k8s.io/kubernetes/pkg/proxy/ipvs
- k8s.io/kubernetes/pkg/proxy/metaproxier
- k8s.io/kubernetes/pkg/proxy/metrics
- k8s.io/kubernetes/pkg/proxy/userspace
- k8s.io/kubernetes/pkg/proxy/util
- k8s.io/kubernetes/pkg/registry/core/service/allocator
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
- k8s.io/kubernetes/pkg/scheduler/api
- k8s.io/kubernetes/pkg/scheduler/framework
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util
- k8s.io/kubernetes/pkg/scheduler/volumebinder
- k8s.io/kubernetes/pkg/security/apparmor
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
- k8s.io/kubernetes/pkg/securitycontext
- k8s.io/kubernetes/pkg/serviceaccount
- k8s.io/kubernetes/pkg/util/async
- k8s.io/kubernetes/pkg/util/bandwidth
- k8s.io/kubernetes/pkg/util/config
- k8s.io/kubernetes/pkg/util/configz
- k8s.io/kubernetes/pkg/util/conntrack
- k8s.io/kubernetes/pkg/util/env
- k8s.io/kubernetes/pkg/util/filesystem
- k8s.io/kubernetes/pkg/util/flag
- k8s.io/kubernetes/pkg/util/flock
- k8s.io/kubernetes/pkg/util/goroutinemap
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
- k8s.io/kubernetes/pkg/util/hash
- k8s.io/kubernetes/pkg/util/ipset
- k8s.io/kubernetes/pkg/util/iptables
- k8s.io/kubernetes/pkg/util/ipvs
- k8s.io/kubernetes/pkg/util/labels
- k8s.io/kubernetes/pkg/util/node
- k8s.io/kubernetes/pkg/util/oom
- k8s.io/kubernetes/pkg/util/parsers
- k8s.io/kubernetes/pkg/util/pod
- k8s.io/kubernetes/pkg/util/procfs
- k8s.io/kubernetes/pkg/util/removeall
- k8s.io/kubernetes/pkg/util/resizefs
- k8s.io/kubernetes/pkg/util/rlimit
- k8s.io/kubernetes/pkg/util/selinux
- k8s.io/kubernetes/pkg/util/slice
- k8s.io/kubernetes/pkg/util/sysctl
- k8s.io/kubernetes/pkg/util/system
- k8s.io/kubernetes/pkg/util/tail
- k8s.io/kubernetes/pkg/util/taints
- k8s.io/kubernetes/pkg/volume
- k8s.io/kubernetes/pkg/volume/util
- k8s.io/kubernetes/pkg/volume/util/fs
- k8s.io/kubernetes/pkg/volume/util/fsquota
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
- k8s.io/kubernetes/pkg/volume/util/subpath
- k8s.io/kubernetes/pkg/volume/util/types
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
- k8s.io/kubernetes/pkg/apis/authentication
- k8s.io/kubernetes/pkg/apis/authentication/v1
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
- k8s.io/kubernetes/pkg/apis/storage/v1
- k8s.io/kubernetes/pkg/scheduler/internal/cache
- selectorRegexp: k8s[.]io/kubernetes/test/
allowedPrefixes:
- k8s.io/kubernetes/test/e2e/framework
- k8s.io/kubernetes/test/e2e/framework/auth
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
- k8s.io/kubernetes/test/e2e/framework/kubectl
- k8s.io/kubernetes/test/e2e/framework/log
- k8s.io/kubernetes/test/e2e/framework/metrics
- k8s.io/kubernetes/test/e2e/framework/network
- k8s.io/kubernetes/test/e2e/framework/node
- k8s.io/kubernetes/test/e2e/framework/pod
- k8s.io/kubernetes/test/e2e/framework/rc
- k8s.io/kubernetes/test/e2e/framework/resource
- k8s.io/kubernetes/test/e2e/framework/service
- k8s.io/kubernetes/test/e2e/framework/ssh
- k8s.io/kubernetes/test/e2e/framework/testfiles
- k8s.io/kubernetes/test/e2e/framework/websocket
- k8s.io/kubernetes/test/e2e/manifest
- k8s.io/kubernetes/test/e2e/perftype
- k8s.io/kubernetes/test/e2e/storage/utils
- k8s.io/kubernetes/test/e2e/system
- k8s.io/kubernetes/test/utils
- k8s.io/kubernetes/test/utils/image
# TODO: why is this here?
- selectorRegexp: k8s[.]io/kubernetes/third_party/
allowedPrefixes:
- k8s.io/kubernetes/third_party/forked/golang/expansion

View File

@ -8,11 +8,11 @@ go_library(
"expect.go",
"flake_reporting_util.go",
"framework.go",
"google_compute.go",
"log.go",
"log_size_monitoring.go",
"nodes_util.go",
"pods.go",
"ports.go",
"provider.go",
"psp.go",
"resource_usage_gatherer.go",
@ -23,17 +23,7 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/util/taints:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
@ -43,13 +33,13 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
@ -59,20 +49,22 @@ go_library(
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
"//staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1:go_default_library",
"//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/resource:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/system:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
@ -80,8 +72,7 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -114,6 +105,7 @@ filegroup(
"//test/e2e/framework/config:all-srcs",
"//test/e2e/framework/deployment:all-srcs",
"//test/e2e/framework/endpoints:all-srcs",
"//test/e2e/framework/endpointslice:all-srcs",
"//test/e2e/framework/events:all-srcs",
"//test/e2e/framework/ginkgowrapper:all-srcs",
"//test/e2e/framework/gpu:all-srcs",
@ -121,13 +113,14 @@ filegroup(
"//test/e2e/framework/job:all-srcs",
"//test/e2e/framework/kubectl:all-srcs",
"//test/e2e/framework/kubelet:all-srcs",
"//test/e2e/framework/kubesystem:all-srcs",
"//test/e2e/framework/log:all-srcs",
"//test/e2e/framework/manifest:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/network:all-srcs",
"//test/e2e/framework/node:all-srcs",
"//test/e2e/framework/perf:all-srcs",
"//test/e2e/framework/pod:all-srcs",
"//test/e2e/framework/podlogs:all-srcs",
"//test/e2e/framework/providers/aws:all-srcs",
"//test/e2e/framework/providers/azure:all-srcs",
"//test/e2e/framework/providers/gce:all-srcs",
@ -146,6 +139,7 @@ filegroup(
"//test/e2e/framework/testfiles:all-srcs",
"//test/e2e/framework/timer:all-srcs",
"//test/e2e/framework/volume:all-srcs",
"//test/e2e/framework/websocket:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],

View File

@ -16,22 +16,30 @@ limitations under the License.
package framework
import "sync"
import (
"sync"
)
// CleanupActionHandle is an integer pointer type for handling cleanup action
type CleanupActionHandle *int
type cleanupFuncHandle struct {
actionHandle CleanupActionHandle
actionHook func()
}
var cleanupActionsLock sync.Mutex
var cleanupActions = map[CleanupActionHandle]func(){}
var cleanupHookList = []cleanupFuncHandle{}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
// The hooks are called in last-in-first-out order.
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
cleanupActions[p] = fn
c := cleanupFuncHandle{actionHandle: p, actionHook: fn}
cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...)
return p
}
@ -40,7 +48,12 @@ func AddCleanupAction(fn func()) CleanupActionHandle {
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
delete(cleanupActions, p)
for i, item := range cleanupHookList {
if item.actionHandle == p {
cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...)
break
}
}
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
@ -51,8 +64,8 @@ func RunCleanupActions() {
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, fn := range cleanupActions {
list = append(list, fn)
for _, p := range cleanupHookList {
list = append(list, p.actionHook)
}
}()
// Run unlocked.

View File

@ -43,14 +43,16 @@ type ExecOptions struct {
CaptureStderr bool
// If false, whitespace in std{err,out} will be removed.
PreserveWhitespace bool
Quiet bool
}
// ExecWithOptions executes a command in the specified container,
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
Logf("ExecWithOptions %+v", options)
if !options.Quiet {
Logf("ExecWithOptions %+v", options)
}
config, err := LoadConfig()
ExpectNoError(err, "failed to load restclient config")

View File

@ -38,7 +38,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
@ -48,15 +47,12 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
testutils "k8s.io/kubernetes/test/utils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
const (
@ -477,6 +473,38 @@ func (f *Framework) AfterEach() {
}
}
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to
// dump namespace information so as it can be used as an alternative of framework
// deleting the namespace towards the end.
func (f *Framework) DeleteNamespace(name string) {
defer func() {
err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
Logf("error deleting namespace %s: %v", name, err)
return
}
err = WaitForNamespacesDeleted(f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout)
if err != nil {
Logf("error deleting namespace %s: %v", name, err)
return
}
// remove deleted namespace from namespacesToDelete map
for i, ns := range f.namespacesToDelete {
if ns == nil {
continue
}
if ns.Name == name {
f.namespacesToDelete = append(f.namespacesToDelete[:i], f.namespacesToDelete[i+1:]...)
}
}
}()
// if current test failed then we should dump namespace information
if !f.SkipNamespaceCreation && ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
DumpAllNamespaceInfo(f.ClientSet, name)
}
}
// CreateNamespace creates a namespace for e2e testing.
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
createTestingNS := TestContext.CreateTestingNS
@ -513,38 +541,6 @@ func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
}
}
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
return e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
}
// WaitForPodNotFound waits for the pod to be completely terminated (not "Get-able").
func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) error {
return e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
}
// WaitForPodRunning waits for the pod to run in the namespace.
func (f *Framework) WaitForPodRunning(podName string) error {
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
}
// WaitForPodReady waits for the pod to flip to ready in the namespace.
func (f *Framework) WaitForPodReady(podName string) error {
return e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
}
// WaitForPodRunningSlow waits for the pod to run in the namespace.
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
func (f *Framework) WaitForPodRunningSlow(podName string) error {
return e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
}
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
// success or failure.
func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
return e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
}
// ClientConfig an externally accessible method for reading the kube client config.
func (f *Framework) ClientConfig() *rest.Config {
ret := rest.CopyConfig(f.clientConfig)
@ -568,83 +564,13 @@ func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod,
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
}
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (*v1.Service, error) {
var err error
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
if block {
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
}
return theService, err
}
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *v1.Service {
if appName == "" {
panic(fmt.Sprintf("no app name provided"))
}
serviceSelector := map[string]string{
"app": appName + "-pod",
}
// For convenience, user sending ports are optional.
portsFunc := func() []v1.ServicePort {
if contPort < 1 || svcPort < 1 {
return nil
}
return []v1.ServicePort{{
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
}}
}
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service-for-" + appName,
Labels: map[string]string{
"app": appName + "-service",
},
},
Spec: v1.ServiceSpec{
Ports: portsFunc(),
Selector: serviceSelector,
},
}, metav1.CreateOptions{})
ExpectNoError(err)
return service
}
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxCount)
ExpectNoError(err)
podLabels := map[string]string{
"app": appName + "-pod",
}
for i, node := range nodes.Items {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: podLabels,
},
Spec: podSpec(node),
}, metav1.CreateOptions{})
ExpectNoError(err)
}
return podLabels
}
// KubeUser is a struct for managing kubernetes user info.
type KubeUser struct {
Name string `yaml:"name"`
User struct {
Username string `yaml:"username"`
Password string `yaml:"password"`
Token string `yaml:"token"`
Password string `yaml:"password" datapolicy:"password"`
Token string `yaml:"token" datapolicy:"token"`
} `yaml:"user"`
}

View File

@ -1,131 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"encoding/json"
"fmt"
"io/ioutil"
"os/exec"
"path/filepath"
"strings"
)
// TODO: These should really just use the GCE API client library or at least use
// better formatted output from the --format flag.
// Returns master & node image string, or error
func lookupClusterImageSources() (string, string, error) {
// Given args for a gcloud compute command, run it with other args, and return the values,
// whether separated by newlines, commas or semicolons.
gcloudf := func(argv ...string) ([]string, error) {
args := []string{"compute"}
args = append(args, argv...)
args = append(args, "--project", TestContext.CloudConfig.ProjectID)
if TestContext.CloudConfig.MultiMaster {
args = append(args, "--region", TestContext.CloudConfig.Region)
} else {
args = append(args, "--zone", TestContext.CloudConfig.Zone)
}
outputBytes, err := exec.Command("gcloud", args...).CombinedOutput()
str := strings.Replace(string(outputBytes), ",", "\n", -1)
str = strings.Replace(str, ";", "\n", -1)
lines := strings.Split(str, "\n")
if err != nil {
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
for _, l := range lines {
Logf(" > %s", l)
}
}
return lines, err
}
// Given a GCE instance, look through its disks, finding one that has a sourceImage
host2image := func(instance string) (string, error) {
// gcloud compute instances describe {INSTANCE} --format="get(disks[].source)"
// gcloud compute disks describe {DISKURL} --format="get(sourceImage)"
disks, err := gcloudf("instances", "describe", instance, "--format=get(disks[].source)")
if err != nil {
return "", err
} else if len(disks) == 0 {
return "", fmt.Errorf("instance %q had no findable disks", instance)
}
// Loop over disks, looking for the boot disk
for _, disk := range disks {
lines, err := gcloudf("disks", "describe", disk, "--format=get(sourceImage)")
if err != nil {
return "", err
} else if len(lines) > 0 && lines[0] != "" {
return lines[0], nil // break, we're done
}
}
return "", fmt.Errorf("instance %q had no disk with a sourceImage", instance)
}
// gcloud compute instance-groups list-instances {GROUPNAME} --format="get(instance)"
nodeName := ""
instGroupName := strings.Split(TestContext.CloudConfig.NodeInstanceGroup, ",")[0]
if lines, err := gcloudf("instance-groups", "list-instances", instGroupName, "--format=get(instance)"); err != nil {
return "", "", err
} else if len(lines) == 0 {
return "", "", fmt.Errorf("no instances inside instance-group %q", instGroupName)
} else {
nodeName = lines[0]
}
nodeImg, err := host2image(nodeName)
if err != nil {
return "", "", err
}
frags := strings.Split(nodeImg, "/")
nodeImg = frags[len(frags)-1]
// For GKE clusters, MasterName will not be defined; we just leave masterImg blank.
masterImg := ""
if masterName := TestContext.CloudConfig.MasterName; masterName != "" {
img, err := host2image(masterName)
if err != nil {
return "", "", err
}
frags = strings.Split(img, "/")
masterImg = frags[len(frags)-1]
}
return masterImg, nodeImg, nil
}
// LogClusterImageSources writes out cluster image sources.
func LogClusterImageSources() {
masterImg, nodeImg, err := lookupClusterImageSources()
if err != nil {
Logf("Cluster image sources lookup failed: %v\n", err)
return
}
Logf("cluster-master-image: %s", masterImg)
Logf("cluster-node-image: %s", nodeImg)
images := map[string]string{
"master_os_image": masterImg,
"node_os_image": nodeImg,
}
outputBytes, _ := json.MarshalIndent(images, "", " ")
filePath := filepath.Join(TestContext.ReportDir, "images.json")
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
Logf("cluster images sources, could not write to %q: %v", filePath, err)
}
}

View File

@ -86,6 +86,9 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd {
fmt.Sprintf("--client-key=%s", filepath.Join(tk.CertDir, "kubecfg.key")))
}
}
if tk.Namespace != "" {
defaultArgs = append(defaultArgs, fmt.Sprintf("--namespace=%s", tk.Namespace))
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"

View File

@ -26,7 +26,7 @@ import (
"github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -42,18 +42,13 @@ func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
// Failf logs the fail info, including a stack trace.
// Failf logs the fail info, including a stack trace starts at 2 levels above its caller
// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "f").
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error with a stack trace that starts at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
skip := offset + 1
skip := 2
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
}
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
@ -64,7 +59,7 @@ func Fail(msg string, callerSkip ...int) {
skip += callerSkip[0]
}
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgowrapper.Fail(nowStamp()+": "+msg, skip)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
}
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`)

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
@ -50,5 +50,5 @@ func Failf(format string, args ...interface{}) {
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("FAIL", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}

View File

@ -159,7 +159,7 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(c)
ExpectNoError(err)
masterAddress := GetMasterHost() + ":22"
instanceAddress := APIAddress() + ":22"
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
workers := make([]*LogSizeGatherer, workersNo)
@ -167,8 +167,8 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri
verifier := &LogsSizeVerifier{
client: c,
stopChannel: stopChannel,
data: prepareData(masterAddress, nodeAddresses),
masterAddress: masterAddress,
data: prepareData(instanceAddress, nodeAddresses),
masterAddress: instanceAddress,
nodeAddresses: nodeAddresses,
wg: sync.WaitGroup{},
workChannel: workChannel,

View File

@ -22,9 +22,6 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
deps = [
"//pkg/kubelet/dockershim/metrics:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/master/ports:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@ -34,8 +31,7 @@ go_library(
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/e2e/system:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -29,13 +29,26 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil"
dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
const (
proxyTimeout = 2 * time.Minute
// dockerOperationsLatencyKey is the key for the operation latency metrics.
// Taken from k8s.io/kubernetes/pkg/kubelet/dockershim/metrics
dockerOperationsLatencyKey = "docker_operations_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
kubeletSubsystem = "kubelet"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
podWorkerDurationKey = "pod_worker_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
podStartDurationKey = "pod_start_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
cgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
podWorkerStartDurationKey = "pod_worker_start_duration_seconds"
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
plegRelistDurationKey = "pleg_relist_duration_seconds"
)
// KubeletMetrics is metrics for kubelet
@ -143,7 +156,7 @@ func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics,
kubeletMetrics := make(KubeletMetrics)
for name, samples := range ms {
const prefix = kubeletmetrics.KubeletSubsystem + "_"
const prefix = kubeletSubsystem + "_"
if !strings.HasPrefix(name, prefix) {
// Not a kubelet metric.
continue
@ -159,13 +172,13 @@ func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics,
// Note that the KubeletMetrics passed in should not contain subsystem prefix.
func GetDefaultKubeletLatencyMetrics(ms KubeletMetrics) KubeletLatencyMetrics {
latencyMetricNames := sets.NewString(
kubeletmetrics.PodWorkerDurationKey,
kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PodStartDurationKey,
kubeletmetrics.CgroupManagerOperationsKey,
dockermetrics.DockerOperationsLatencyKey,
kubeletmetrics.PodWorkerStartDurationKey,
kubeletmetrics.PLEGRelistDurationKey,
podWorkerDurationKey,
podWorkerStartDurationKey,
podStartDurationKey,
cgroupManagerOperationsKey,
dockerOperationsLatencyKey,
podWorkerStartDurationKey,
plegRelistDurationKey,
)
return GetKubeletLatencyMetrics(ms, latencyMetricNames)
}

View File

@ -19,6 +19,7 @@ package metrics
import (
"context"
"fmt"
"regexp"
"sync"
"time"
@ -26,11 +27,20 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/master/ports"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/system"
"k8s.io/klog"
"k8s.io/klog/v2"
)
const (
// insecureSchedulerPort is the default port for the scheduler status server.
// May be overridden by a flag at startup.
// Deprecated: use the secure KubeSchedulerPort instead.
insecureSchedulerPort = 10251
// insecureKubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
// Deprecated: use the secure KubeControllerManagerPort instead.
insecureKubeControllerManagerPort = 10252
)
// Collection is metrics collection of components
@ -51,38 +61,48 @@ type Grabber struct {
grabFromKubelets bool
grabFromScheduler bool
grabFromClusterAutoscaler bool
masterName string
registeredMaster bool
kubeScheduler string
kubeControllerManager string
waitForControllerManagerReadyOnce sync.Once
}
// NewMetricsGrabber returns new metrics which are initialized.
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) {
registeredMaster := false
masterName := ""
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
kubeScheduler := ""
kubeControllerManager := ""
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
if len(nodeList.Items) < 1 {
klog.Warning("Can't find any Nodes in the API server to grab metrics from")
if len(podList.Items) < 1 {
klog.Warningf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
}
for _, node := range nodeList.Items {
if system.DeprecatedMightBeMasterNode(node.Name) {
registeredMaster = true
masterName = node.Name
for _, pod := range podList.Items {
if regKubeScheduler.MatchString(pod.Name) {
kubeScheduler = pod.Name
}
if regKubeControllerManager.MatchString(pod.Name) {
kubeControllerManager = pod.Name
}
if kubeScheduler != "" && kubeControllerManager != "" {
break
}
}
if !registeredMaster {
if kubeScheduler == "" {
scheduler = false
klog.Warningf("Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled.")
}
if kubeControllerManager == "" {
controllers = false
clusterAutoscaler = ec != nil
if clusterAutoscaler {
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.")
} else {
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.")
}
klog.Warningf("Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled.")
}
if ec == nil {
klog.Warningf("Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.")
}
return &Grabber{
@ -93,14 +113,14 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
grabFromKubelets: kubelets,
grabFromScheduler: scheduler,
grabFromClusterAutoscaler: clusterAutoscaler,
masterName: masterName,
registeredMaster: registeredMaster,
kubeScheduler: kubeScheduler,
kubeControllerManager: kubeControllerManager,
}, nil
}
// HasRegisteredMaster returns if metrics grabber was able to find a master node
func (g *Grabber) HasRegisteredMaster() bool {
return g.registeredMaster
// HasControlPlanePods returns true if metrics grabber was able to find control-plane pods
func (g *Grabber) HasControlPlanePods() bool {
return g.kubeScheduler != "" && g.kubeControllerManager != ""
}
// GrabFromKubelet returns metrics from kubelet
@ -129,10 +149,10 @@ func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (Kub
// GrabFromScheduler returns metrics from scheduler
func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
if !g.registeredMaster {
return SchedulerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping Scheduler's metrics gathering")
if g.kubeScheduler == "" {
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler pod is not registered. Skipping Scheduler's metrics gathering")
}
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.InsecureSchedulerPort)
output, err := g.getMetricsFromPod(g.client, g.kubeScheduler, metav1.NamespaceSystem, insecureSchedulerPort)
if err != nil {
return SchedulerMetrics{}, err
}
@ -141,8 +161,8 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
// GrabFromClusterAutoscaler returns metrics from cluster autoscaler
func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) {
if !g.registeredMaster && g.externalClient == nil {
return ClusterAutoscalerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ClusterAutoscaler's metrics gathering")
if !g.HasControlPlanePods() && g.externalClient == nil {
return ClusterAutoscalerMetrics{}, fmt.Errorf("Did not find control-plane pods. Skipping ClusterAutoscaler's metrics gathering")
}
var client clientset.Interface
var namespace string
@ -162,12 +182,12 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
// GrabFromControllerManager returns metrics from controller manager
func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) {
if !g.registeredMaster {
return ControllerManagerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping ControllerManager's metrics gathering")
if g.kubeControllerManager == "" {
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager pod is not registered. Skipping ControllerManager's metrics gathering")
}
var err error
podName := fmt.Sprintf("%v-%v", "kube-controller-manager", g.masterName)
podName := g.kubeControllerManager
g.waitForControllerManagerReadyOnce.Do(func() {
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
err = fmt.Errorf("error waiting for controller manager pod to be ready: %w", readyErr)
@ -176,7 +196,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err = fmt.Errorf("error waiting for controller manager pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -187,7 +207,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
return ControllerManagerMetrics{}, err
}
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, ports.InsecureKubeControllerManagerPort)
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
if err != nil {
return ControllerManagerMetrics{}, err
}

View File

@ -10,24 +10,41 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/system:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/utils/net:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["wait_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
@ -41,17 +58,3 @@ filegroup(
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["wait_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
],
)

View File

@ -18,21 +18,30 @@ package node
import (
"context"
"encoding/json"
"fmt"
"net"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
clientretry "k8s.io/client-go/util/retry"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
netutil "k8s.io/utils/net"
)
const (
@ -47,6 +56,29 @@ const (
sshPort = "22"
)
var (
// unreachableTaintTemplate is the taint for when a node becomes unreachable.
// Copied from pkg/controller/nodelifecycle to avoid pulling extra dependencies
unreachableTaintTemplate = &v1.Taint{
Key: v1.TaintNodeUnreachable,
Effect: v1.TaintEffectNoExecute,
}
// notReadyTaintTemplate is the taint for when a node is not ready for executing pods.
// Copied from pkg/controller/nodelifecycle to avoid pulling extra dependencies
notReadyTaintTemplate = &v1.Taint{
Key: v1.TaintNodeNotReady,
Effect: v1.TaintEffectNoExecute,
}
// updateTaintBackOff contains the maximum retries and the wait interval between two retries.
updateTaintBackOff = wait.Backoff{
Steps: 5,
Duration: 100 * time.Millisecond,
Jitter: 1.0,
}
)
// PodNode is a pod-node pair indicating which node a given pod is running on
type PodNode struct {
// Pod represents pod name
@ -78,7 +110,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
if taint.MatchTaint(unreachableTaintTemplate) || taint.MatchTaint(notReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
@ -160,7 +192,7 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
nodeList.Items = l
}
// TotalRegistered returns number of registered Nodes excluding Master Node.
// TotalRegistered returns number of schedulable Nodes.
func TotalRegistered(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
@ -170,7 +202,7 @@ func TotalRegistered(c clientset.Interface) (int, error) {
return len(nodes.Items), nil
}
// TotalReady returns number of ready Nodes excluding Master Node.
// TotalReady returns number of ready schedulable Nodes.
func TotalReady(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
if err != nil {
@ -217,6 +249,37 @@ func GetInternalIP(node *v1.Node) (string, error) {
return host, nil
}
// FirstAddressByTypeAndFamily returns the first address that matches the given type and family of the list of nodes
func FirstAddressByTypeAndFamily(nodelist *v1.NodeList, addrType v1.NodeAddressType, family v1.IPFamily) string {
for _, n := range nodelist.Items {
addresses := GetAddressesByTypeAndFamily(&n, addrType, family)
if len(addresses) > 0 {
return addresses[0]
}
}
return ""
}
// GetAddressesByTypeAndFamily returns a list of addresses of the given addressType for the given node
// and filtered by IPFamily
func GetAddressesByTypeAndFamily(node *v1.Node, addressType v1.NodeAddressType, family v1.IPFamily) (ips []string) {
for _, nodeAddress := range node.Status.Addresses {
if nodeAddress.Type != addressType {
continue
}
if nodeAddress.Address == "" {
continue
}
if family == v1.IPv6Protocol && netutil.IsIPv6String(nodeAddress.Address) {
ips = append(ips, nodeAddress.Address)
}
if family == v1.IPv4Protocol && !netutil.IsIPv6String(nodeAddress.Address) {
ips = append(ips, nodeAddress.Address)
}
}
return
}
// GetAddresses returns a list of addresses of the given addressType for the given node
func GetAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
for j := range node.Status.Addresses {
@ -329,24 +392,6 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
return nodes, nil
}
// GetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, nil, fmt.Errorf("get nodes error: %s", err)
}
for _, n := range all.Items {
if system.DeprecatedMightBeMasterNode(n.Name) {
masters.Insert(n.Name)
} else if IsNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes, nil
}
// isNodeUntainted tests whether a fake pod can be scheduled on "node", given its current taints.
// TODO: need to discuss wether to return bool and error type
func isNodeUntainted(node *v1.Node) bool {
@ -375,8 +420,6 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
},
}
nodeInfo := schedulernodeinfo.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{}
for _, t := range strings.Split(nonblockingTaints, ",") {
@ -385,6 +428,7 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
}
}
n := node
if len(nonblockingTaintsMap) > 0 {
nodeCopy := node.DeepCopy()
nodeCopy.Spec.Taints = []v1.Taint{}
@ -393,20 +437,36 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, v)
}
}
nodeInfo.SetNode(nodeCopy)
} else {
nodeInfo.SetNode(node)
n = nodeCopy
}
return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, fakePod.Spec.Tolerations)
}
func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool {
filteredTaints := []v1.Taint{}
for _, taint := range taints {
if taint.Effect == v1.TaintEffectNoExecute || taint.Effect == v1.TaintEffectNoSchedule {
filteredTaints = append(filteredTaints, taint)
}
}
taints, err := nodeInfo.Taints()
if err != nil {
e2elog.Failf("Can't test predicates for node %s: %v", node.Name, err)
toleratesTaint := func(taint v1.Taint) bool {
for _, toleration := range tolerations {
if toleration.ToleratesTaint(&taint) {
return true
}
}
return false
}
return v1helper.TolerationsTolerateTaintsWithFilter(fakePod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
})
for _, taint := range filteredTaints {
if !toleratesTaint(taint) {
return false
}
}
return true
}
// IsNodeSchedulable returns true if:
@ -429,6 +489,14 @@ func IsNodeReady(node *v1.Node) bool {
return nodeReady && networkReady
}
// isNodeSchedulableWithoutTaints returns true if:
// 1) doesn't have "unschedulable" field set
// 2) it also returns true from IsNodeReady
// 3) it also returns true from isNodeUntainted
func isNodeSchedulableWithoutTaints(node *v1.Node) bool {
return IsNodeSchedulable(node) && isNodeUntainted(node)
}
// hasNonblockingTaint returns true if the node contains at least
// one taint with a key matching the regexp.
func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
@ -471,3 +539,295 @@ func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
return result, nil
}
// GetClusterZones returns the values of zone label collected from all nodes.
func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
}
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
zones.Insert(zone)
}
if zone, found := node.Labels[v1.LabelTopologyZone]; found {
zones.Insert(zone)
}
}
return zones, nil
}
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
podLabels := map[string]string{
"app": appName + "-pod",
}
for i, node := range nodes.Items {
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: podLabels,
},
Spec: podSpec(node),
}, metav1.CreateOptions{})
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
}
return podLabels
}
// RemoveTaintOffNode removes the given taint from the given node.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
err := removeNodeTaint(c, nodeName, nil, &taint)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
verifyThatTaintIsGone(c, nodeName, &taint)
}
// AddOrUpdateTaintOnNode adds the given taint to the given node or updates taint.
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
// TODO use wrapper methods in expect.go after removing the dependency on this
// package from the core e2e framework.
err := addOrUpdateTaintOnNode(c, nodeName, &taint)
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
}
// addOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
// to update nodes; otherwise, no API calls. Return error if any.
// copied from pkg/controller/controller_utils.go AddOrUpdateTaintOnNode()
func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
firstTry := true
return clientretry.RetryOnConflict(updateTaintBackOff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := addOrUpdateTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("failed to update taint of node")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// addOrUpdateTaint tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated
// false otherwise.
// copied from pkg/util/taints/taints.go AddOrUpdateTaint()
func addOrUpdateTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) {
newNode := node.DeepCopy()
nodeTaints := newNode.Spec.Taints
var newTaints []v1.Taint
updated := false
for i := range nodeTaints {
if taint.MatchTaint(&nodeTaints[i]) {
if semantic.DeepEqual(*taint, nodeTaints[i]) {
return newNode, false, nil
}
newTaints = append(newTaints, *taint)
updated = true
continue
}
newTaints = append(newTaints, nodeTaints[i])
}
if !updated {
newTaints = append(newTaints, *taint)
}
newNode.Spec.Taints = newTaints
return newNode, true, nil
}
// semantic can do semantic deep equality checks for core objects.
// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
// copied from pkg/apis/core/helper/helpers.go Semantic
var semantic = conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
return a.Cmp(b) == 0
},
func(a, b metav1.MicroTime) bool {
return a.UTC() == b.UTC()
},
func(a, b metav1.Time) bool {
return a.UTC() == b.UTC()
},
func(a, b labels.Selector) bool {
return a.String() == b.String()
},
func(a, b fields.Selector) bool {
return a.String() == b.String()
},
)
// removeNodeTaint is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
// Short circuit for limiting amount of API calls.
if node != nil {
match := false
for _, taint := range taints {
if taintExists(node.Spec.Taints, taint) {
match = true
break
}
}
if !match {
return nil
}
}
firstTry := true
return clientretry.RetryOnConflict(updateTaintBackOff, func() error {
var err error
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
}
if err != nil {
return err
}
var newNode *v1.Node
oldNodeCopy := oldNode
updated := false
for _, taint := range taints {
curNewNode, ok, err := removeTaint(oldNodeCopy, taint)
if err != nil {
return fmt.Errorf("failed to remove taint of node")
}
updated = updated || ok
newNode = curNewNode
oldNodeCopy = curNewNode
}
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
})
}
// patchNodeTaints patches node's taints.
func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
}
newTaints := newNode.Spec.Taints
newNodeClone := oldNode.DeepCopy()
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
}
// removeTaint tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated
// false otherwise.
func removeTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) {
newNode := node.DeepCopy()
nodeTaints := newNode.Spec.Taints
if len(nodeTaints) == 0 {
return newNode, false, nil
}
if !taintExists(nodeTaints, taint) {
return newNode, false, nil
}
newTaints, _ := deleteTaint(nodeTaints, taint)
newNode.Spec.Taints = newTaints
return newNode, true, nil
}
// deleteTaint removes all the taints that have the same key and effect to given taintToDelete.
func deleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) {
var newTaints []v1.Taint
deleted := false
for i := range taints {
if taintToDelete.MatchTaint(&taints[i]) {
deleted = true
continue
}
newTaints = append(newTaints, taints[i])
}
return newTaints, deleted
}
func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
if taintExists(nodeUpdated.Spec.Taints, taint) {
e2elog.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}

View File

@ -28,8 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
testutils "k8s.io/kubernetes/test/utils"
)
const sleepTime = 20 * time.Second
@ -41,8 +39,7 @@ var requiredPerNodePods = []*regexp.Regexp{
}
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckReady(c, size, timeout)
return err
@ -59,9 +56,6 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, node := range nodes.Items {
@ -84,7 +78,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.DeprecatedMightBeMasterNode(node.Name) {
if isNodeSchedulableWithoutTaints(&node) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
@ -150,8 +144,7 @@ func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Durat
}
// CheckReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of Nodes
// excluding Master Node.
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
nodes, err := waitListSchedulableNodes(c)
@ -187,9 +180,6 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
@ -208,30 +198,25 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
return nodes, nil
}
// CheckReadyForTests returns a method usable in polling methods which will check that the nodes are
// in a testable state based on schedulability.
// CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests).
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
attempt := 0
var notSchedulable []*v1.Node
return func() (bool, error) {
attempt++
notSchedulable = nil
var nodesNotReadyYet []v1.Node
opts := metav1.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
// remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic.
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
if err != nil {
e2elog.Logf("Unexpected error listing nodes: %v", err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !readyForTests(node, nonblockingTaints) {
notSchedulable = append(notSchedulable, node)
for _, node := range allNodes.Items {
if !readyForTests(&node, nonblockingTaints) {
nodesNotReadyYet = append(nodesNotReadyYet, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
@ -240,25 +225,29 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notSchedulable) > 0 {
// We log the *reason* why nodes are not schedulable, specifically, its usually the network not being available.
if len(nodesNotReadyYet) > 0 {
// In large clusters, log them only every 10th pass.
if len(nodes.Items) < largeClusterThreshold || attempt%10 == 0 {
e2elog.Logf("Unschedulable nodes:")
for i := range notSchedulable {
e2elog.Logf("-> %s Ready=%t Network=%t Taints=%v NonblockingTaints:%v",
notSchedulable[i].Name,
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
notSchedulable[i].Spec.Taints,
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
for _, node := range nodesNotReadyYet {
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
node.Name,
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
node.Spec.Taints,
nonblockingTaints,
)
}
e2elog.Logf("================================")
if len(nodesNotReadyYet) > allowedNotReadyNodes {
ready := len(allNodes.Items) - len(nodesNotReadyYet)
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
}
}
}
return len(notSchedulable) <= allowedNotReadyNodes, nil
return len(nodesNotReadyYet) <= allowedNotReadyNodes, nil
}
}

View File

@ -20,11 +20,11 @@ import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
@ -34,7 +34,7 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const etcdImage = "3.4.3-0"
const etcdImage = "3.4.13-0"
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {
@ -46,20 +46,6 @@ func EtcdUpgrade(targetStorage, targetVersion string) error {
}
}
// MasterUpgrade upgrades master node on GCE/GKE.
func MasterUpgrade(f *Framework, v string) error {
switch TestContext.Provider {
case "gce":
return masterUpgradeGCE(v, false)
case "gke":
return masterUpgradeGKE(f.Namespace.Name, v)
case "kubernetes-anywhere":
return masterUpgradeKubernetesAnywhere(v)
default:
return fmt.Errorf("MasterUpgrade() is not implemented for provider %s", TestContext.Provider)
}
}
func etcdUpgradeGCE(targetStorage, targetVersion string) error {
env := append(
os.Environ(),
@ -67,37 +53,12 @@ func etcdUpgradeGCE(targetStorage, targetVersion string) error {
"STORAGE_BACKEND="+targetStorage,
"TEST_ETCD_IMAGE="+etcdImage)
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
return err
}
// MasterUpgradeGCEWithKubeProxyDaemonSet upgrades master node on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func MasterUpgradeGCEWithKubeProxyDaemonSet(v string, enableKubeProxyDaemonSet bool) error {
return masterUpgradeGCE(v, enableKubeProxyDaemonSet)
}
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
// TODO: Remove these variables when they're no longer needed for downgrades.
if TestContext.EtcdUpgradeVersion != "" && TestContext.EtcdUpgradeStorage != "" {
env = append(env,
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
"TEST_ETCD_IMAGE="+etcdImage)
} else {
// In e2e tests, we skip the confirmation prompt about
// implicit etcd upgrades to simulate the user entering "y".
env = append(env, "TEST_ALLOW_IMPLICIT_ETCD_UPGRADE=true")
}
v := "v" + rawV
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-M", v)
return err
}
func locationParamGKE() string {
// LocationParamGKE returns parameter related to location for gcloud command.
func LocationParamGKE() string {
if TestContext.CloudConfig.MultiMaster {
// GKE Regional Clusters are being tested.
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
@ -105,7 +66,8 @@ func locationParamGKE() string {
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
}
func appendContainerCommandGroupIfNeeded(args []string) []string {
// AppendContainerCommandGroupIfNeeded returns container command group parameter if necessary.
func AppendContainerCommandGroupIfNeeded(args []string) []string {
if TestContext.CloudConfig.Region != "" {
// TODO(wojtek-t): Get rid of it once Regional Clusters go to GA.
return append([]string{"beta"}, args...)
@ -113,184 +75,40 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
return args
}
func masterUpgradeGKE(namespace string, v string) error {
// MasterUpgradeGKE upgrades master node to the specified version on GKE.
func MasterUpgradeGKE(namespace string, v string) error {
Logf("Upgrading master to %q", v)
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
LocationParamGKE(),
"upgrade",
TestContext.CloudConfig.Cluster,
"--master",
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
_, _, err := RunCmd("gcloud", AppendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
waitForSSHTunnels(namespace)
WaitForSSHTunnels(namespace)
return nil
}
func masterUpgradeKubernetesAnywhere(v string) error {
Logf("Upgrading master to %q", v)
kaPath := TestContext.KubernetesAnywherePath
originalConfigPath := filepath.Join(kaPath, ".config")
backupConfigPath := filepath.Join(kaPath, ".config.bak")
updatedConfigPath := filepath.Join(kaPath, fmt.Sprintf(".config-%s", v))
// modify config with specified k8s version
if _, _, err := RunCmd("sed",
"-i.bak", // writes original to .config.bak
fmt.Sprintf(`s/kubernetes_version=.*$/kubernetes_version=%q/`, v),
originalConfigPath); err != nil {
return err
}
defer func() {
// revert .config.bak to .config
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
}
}()
// invoke ka upgrade
if _, _, err := RunCmd("make", "-C", TestContext.KubernetesAnywherePath,
"WAIT_FOR_KUBECONFIG=y", "upgrade-master"); err != nil {
return err
}
// move .config to .config.<version>
if err := os.Rename(originalConfigPath, updatedConfigPath); err != nil {
return err
}
return nil
}
// NodeUpgrade upgrades nodes on GCE/GKE.
func NodeUpgrade(f *Framework, v string, img string) error {
// Perform the upgrade.
var err error
switch TestContext.Provider {
case "gce":
err = nodeUpgradeGCE(v, img, false)
case "gke":
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
default:
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
}
if err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
// NodeUpgradeGCEWithKubeProxyDaemonSet upgrades nodes on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
// Perform the upgrade.
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
func waitForNodesReadyAfterUpgrade(f *Framework) error {
// Wait for it to complete and validate nodes are healthy.
//
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are.
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
if err != nil {
return fmt.Errorf("couldn't detect number of nodes")
}
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
return err
}
return nil
}
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
v := "v" + rawV
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
if img != "" {
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", "-o", v)
return err
}
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", v)
return err
}
func nodeUpgradeGKE(namespace string, v string, img string) error {
Logf("Upgrading nodes to version %q and image %q", v, img)
nps, err := nodePoolsGKE()
if err != nil {
return err
}
Logf("Found node pools %v", nps)
for _, np := range nps {
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
"upgrade",
TestContext.CloudConfig.Cluster,
fmt.Sprintf("--node-pool=%s", np),
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
if len(img) > 0 {
args = append(args, fmt.Sprintf("--image-type=%s", img))
}
_, _, err = RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
waitForSSHTunnels(namespace)
}
return nil
}
func nodePoolsGKE() ([]string, error) {
args := []string{
"container",
"node-pools",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
"list",
fmt.Sprintf("--cluster=%s", TestContext.CloudConfig.Cluster),
"--format=get(name)",
}
stdout, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return nil, err
}
if len(strings.TrimSpace(stdout)) == 0 {
return []string{}, nil
}
return strings.Fields(stdout), nil
}
func gceUpgradeScript() string {
// GCEUpgradeScript returns path of script for upgrading on GCE.
func GCEUpgradeScript() string {
if len(TestContext.GCEUpgradeScript) == 0 {
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
}
return TestContext.GCEUpgradeScript
}
func waitForSSHTunnels(namespace string) {
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
Logf("Waiting for SSH tunnels to establish")
RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",
@ -345,6 +163,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
for _, node := range nodes {
node := node
go func() {
defer ginkgo.GinkgoRecover()
defer wg.Done()
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)

View File

@ -12,25 +12,21 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/pod",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/resource:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -19,6 +19,7 @@ package pod
import (
"context"
"fmt"
"runtime"
"time"
v1 "k8s.io/api/core/v1"
@ -33,6 +34,24 @@ var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
)
// Config is a struct containing all arguments for creating a pod.
// SELinux testing requires to pass HostIPC and HostPID as boolean arguments.
type Config struct {
NS string
PVCs []*v1.PersistentVolumeClaim
PVCsReadOnly bool
InlineVolumeSources []*v1.VolumeSource
IsPrivileged bool
Command string
HostIPC bool
HostPID bool
SeLinuxLabel *v1.SELinuxOptions
FsGroup *int64
NodeSelection NodeSelection
ImageID int
PodFSGroupChangePolicy *v1.PodFSGroupChangePolicy
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
@ -79,27 +98,29 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
}
// CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout)
func CreateSecPod(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, podConfig, timeout)
}
// CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
SetNodeSelection(&pod.Spec, node)
func CreateSecPodWithNodeSelection(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod, err := MakeSecPod(podConfig)
if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err)
}
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, podConfig.NS, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
@ -153,17 +174,23 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
// MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod.
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
if len(command) == 0 {
command = "trap exit TERM; while true; do sleep 1; done"
func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
if podConfig.NS == "" {
return nil, fmt.Errorf("Cannot create pod with empty namespace")
}
podName := "security-context-" + string(uuid.NewUUID())
if fsGroup == nil {
fsGroup = func(i int64) *int64 {
if len(podConfig.Command) == 0 {
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
}
podName := "pod-" + string(uuid.NewUUID())
if podConfig.FsGroup == nil && runtime.GOOS != "windows" {
podConfig.FsGroup = func(i int64) *int64 {
return &i
}(1000)
}
image := imageutils.BusyBox
if podConfig.ImageID != imageutils.None {
image = podConfig.ImageID
}
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -171,33 +198,37 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: ns,
Namespace: podConfig.NS,
},
Spec: v1.PodSpec{
HostIPC: hostIPC,
HostPID: hostPID,
HostIPC: podConfig.HostIPC,
HostPID: podConfig.HostPID,
SecurityContext: &v1.PodSecurityContext{
FSGroup: fsGroup,
FSGroup: podConfig.FsGroup,
},
Containers: []v1.Container{
{
Name: "write-pod",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Image: imageutils.GetE2EImage(image),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
Args: []string{"-c", podConfig.Command},
SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
Privileged: &podConfig.IsPrivileged,
},
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
if podConfig.PodFSGroupChangePolicy != nil {
podSpec.Spec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
}
var volumeMounts = make([]v1.VolumeMount, 0)
var volumeDevices = make([]v1.VolumeDevice, 0)
var volumes = make([]v1.Volume, len(pvclaims)+len(inlineVolumeSources))
var volumes = make([]v1.Volume, len(podConfig.PVCs)+len(podConfig.InlineVolumeSources))
volumeIndex := 0
for _, pvclaim := range pvclaims {
for _, pvclaim := range podConfig.PVCs {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
@ -205,10 +236,10 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}}
volumeIndex++
}
for _, src := range inlineVolumeSources {
for _, src := range podConfig.InlineVolumeSources {
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
// In-line volumes can be only filesystem, not block.
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
@ -219,6 +250,10 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
podSpec.Spec.Volumes = volumes
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
return podSpec
if runtime.GOOS != "windows" {
podSpec.Spec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel
}
SetNodeSelection(&podSpec.Spec, podConfig.NodeSelection)
return podSpec, nil
}

View File

@ -35,10 +35,15 @@ const (
PodDeleteTimeout = 5 * time.Minute
)
// DeletePodOrFail deletes the pod of the specified namespace and name.
// DeletePodOrFail deletes the pod of the specified namespace and name. Resilient to the pod
// not existing.
func DeletePodOrFail(c clientset.Interface, ns, name string) {
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && apierrors.IsNotFound(err) {
return
}
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
}
@ -69,3 +74,31 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string
}
return nil
}
// DeletePodWithGracePeriod deletes the passed-in pod. Resilient to the pod not existing.
func DeletePodWithGracePeriod(c clientset.Interface, pod *v1.Pod, grace int64) error {
return DeletePodWithGracePeriodByName(c, pod.GetName(), pod.GetNamespace(), grace)
}
// DeletePodsWithGracePeriod deletes the passed-in pods. Resilient to the pods not existing.
func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64) error {
for _, pod := range pods {
if err := DeletePodWithGracePeriod(c, &pod, grace); err != nil {
return err
}
}
return nil
}
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
if err != nil {
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
}
return nil
}

View File

@ -32,14 +32,17 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/util/podutils"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached completed state.
var errPodCompleted = fmt.Errorf("pod ran to completion")
// TODO: Move to its own subpkg.
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
func expectNoError(err error, explain ...interface{}) {
@ -155,7 +158,7 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
return false, errPodCompleted
}
return false, nil
}
@ -184,10 +187,10 @@ func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.C
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
e2elog.Logf("The status of Pod %s is %s which is unexpected", podName, pod.Status.Phase)
return false, conditions.ErrPodCompleted
return false, errPodCompleted
case v1.PodRunning:
e2elog.Logf("The status of Pod %s is %s (Ready = %v)", podName, pod.Status.Phase, podutil.IsPodReady(pod))
return podutil.IsPodReady(pod), nil
e2elog.Logf("The status of Pod %s is %s (Ready = %v)", podName, pod.Status.Phase, podutils.IsPodReady(pod))
return podutils.IsPodReady(pod), nil
}
e2elog.Logf("The status of Pod %s is %s, waiting for it to be Running (with Ready = true)", podName, pod.Status.Phase)
return false, nil
@ -295,6 +298,29 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
return e
}
func podContainerFailed(c clientset.Interface, namespace, podName string, containerIndex int, reason string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodPending:
if len(pod.Status.ContainerStatuses) == 0 {
return false, nil
}
containerStatus := pod.Status.ContainerStatuses[containerIndex]
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == reason {
return true, nil
}
return false, nil
case v1.PodFailed, v1.PodRunning, v1.PodSucceeded:
return false, fmt.Errorf("pod was expected to be pending, but it is in the state: %s", pod.Status.Phase)
}
return false, nil
}
}
// LogPodStates logs basic info of provided pods for debugging.
func LogPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
@ -376,29 +402,28 @@ func FilterNonRestartablePods(pods []*v1.Pod) []*v1.Pod {
}
func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool {
if !kubetypes.IsMirrorPod(p) {
// Check if the pod is a mirror pod
if _, ok := p.Annotations[v1.MirrorPodAnnotationKey]; !ok {
return false
}
return p.Spec.RestartPolicy != v1.RestartPolicyAlways
}
// NewExecPodSpec returns the pod spec of hostexec pod
func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
// NewAgnhostPod returns a pod that uses the agnhost image. The image's binary supports various subcommands
// that behave the same, no matter the underlying OS. If no args are given, it defaults to the pause subcommand.
// For more information about agnhost subcommands, see: https://github.com/kubernetes/kubernetes/tree/master/test/images/agnhost#agnhost
func NewAgnhostPod(ns, podName string, volumes []v1.Volume, mounts []v1.VolumeMount, ports []v1.ContainerPort, args ...string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: podName,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "agnhost",
Image: imageutils.GetE2EImage(imageutils.Agnhost),
ImagePullPolicy: v1.PullIfNotPresent,
},
NewAgnhostContainer("agnhost-container", mounts, ports, args...),
},
HostNetwork: hostNetwork,
Volumes: volumes,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &immediate,
},
@ -406,25 +431,35 @@ func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
return pod
}
// NewAgnhostContainer returns the container Spec of an agnhost container.
func NewAgnhostContainer(containerName string, mounts []v1.VolumeMount, ports []v1.ContainerPort, args ...string) v1.Container {
if len(args) == 0 {
args = []string{"pause"}
}
return v1.Container{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: args,
VolumeMounts: mounts,
Ports: ports,
SecurityContext: &v1.SecurityContext{},
ImagePullPolicy: v1.PullIfNotPresent,
}
}
// NewExecPodSpec returns the pod spec of hostexec pod
func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
pod := NewAgnhostPod(ns, name, nil, nil, nil)
pod.Spec.HostNetwork = hostNetwork
return pod
}
// newExecPodSpec returns the pod spec of exec pod
func newExecPodSpec(ns, generateName string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: ns,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &immediate,
Containers: []v1.Container{
{
Name: "agnhost-pause",
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"pause"},
},
},
},
}
// GenerateName is an optional prefix, used by the server,
// to generate a unique name ONLY IF the Name field has not been provided
pod := NewAgnhostPod(ns, "", nil, nil, nil)
pod.ObjectMeta.GenerateName = generateName
return pod
}
@ -441,14 +476,11 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return retrievedPod.Status.Phase == v1.PodRunning, nil
})
expectNoError(err)
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
return execPod
}
@ -498,25 +530,33 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
return getPodLogsInternal(c, namespace, podName, containerName, false, nil)
}
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
sinceTime := metav1.NewTime(since)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime)
}
// GetPreviousPodLogs returns the logs of the previous instance of the
// specified container (namespace/pod/container).
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
return getPodLogsInternal(c, namespace, podName, containerName, true, nil)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.CoreV1().RESTClient().Get().
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time) (string, error) {
request := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do(context.TODO()).
Raw()
Param("previous", strconv.FormatBool(previous))
if sinceTime != nil {
request.Param("sinceTime", sinceTime.Format(time.RFC3339))
}
logs, err := request.Do(context.TODO()).Raw()
if err != nil {
return "", err
}
@ -543,3 +583,72 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin
}
return filtered, nil
}
// GetPods return the label matched pods in the given ns
func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) {
label := labels.SelectorFromSet(matchLabels)
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts)
if err != nil {
return []v1.Pod{}, err
}
return pods.Items, nil
}
// GetPodSecretUpdateTimeout returns the timeout duration for updating pod secret.
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := getNodeTTLAnnotationValue(c)
if err != nil {
e2elog.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout
}
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
node := &nodes.Items[0]
if node.Annotations == nil {
return time.Duration(0), fmt.Errorf("No annotations found on the node")
}
value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]
if !ok {
return time.Duration(0), fmt.Errorf("No TTL annotation found on the node")
}
intValue, err := strconv.Atoi(value)
if err != nil {
return time.Duration(0), fmt.Errorf("Cannot convert TTL annotation from %#v to int", *node)
}
return time.Duration(intValue) * time.Second, nil
}
// FilterActivePods returns pods that have not terminated.
func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
var result []*v1.Pod
for _, p := range pods {
if IsPodActive(p) {
result = append(result, p)
} else {
klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp)
}
}
return result
}
// IsPodActive return true if the pod meets certain conditions.
func IsPodActive(p *v1.Pod) bool {
return v1.PodSucceeded != p.Status.Phase &&
v1.PodFailed != p.Status.Phase &&
p.DeletionTimestamp == nil
}

View File

@ -31,13 +31,10 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubectl/pkg/util/podutils"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
testutils "k8s.io/kubernetes/test/utils"
)
@ -70,8 +67,11 @@ const (
type podCondition func(pod *v1.Pod) (bool, error)
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration, err error) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
if err != nil {
errStr += fmt.Sprintf("Last error: %s\n", err)
}
// Print bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
@ -113,6 +113,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
badPods := []v1.Pod{}
desiredPods := 0
notReady := int32(0)
var lastAPIError error
if wait.PollImmediate(poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
@ -120,13 +121,13 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
// Clear API error from the last attempt in case the following calls succeed.
lastAPIError = nil
rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
lastAPIError = err
return false, err
}
for _, rc := range rcList.Items {
@ -136,10 +137,8 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
lastAPIError = err
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rs := range rsList.Items {
@ -149,10 +148,8 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
lastAPIError = err
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
nOk := int32(0)
@ -196,7 +193,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
return false, nil
}) != nil {
if !ignoreNotReady {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError))
}
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
}
@ -218,7 +215,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
}
// log now so that current pod info is reported before calling `condition()`
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
e2elog.Logf("Pod %q satisfied condition %q", podName, desc)
@ -304,7 +301,7 @@ func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, d
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
conditionNotMatch = append(conditionNotMatch, fmt.Sprintf("%s_%s(%s)", pod.Name, pod.Namespace, pod.UID))
}
}
if len(conditionNotMatch) <= 0 {
@ -400,9 +397,6 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
found := false
@ -428,40 +422,6 @@ func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
// WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running.
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return err
}
selector, err := e2eresource.GetSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := e2eresource.GetReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
}
return nil
}
// WaitForControlledPods waits up to podListTimeout for getting pods of the specified controller name and return them.
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
rtObject, err := e2eresource.GetRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return nil, err
}
selector, err := e2eresource.GetSelectorFromRuntimeObject(rtObject)
if err != nil {
return nil, err
}
return WaitForPodsWithLabel(c, ns, selector)
}
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
@ -487,9 +447,6 @@ func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selecto
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
continue
}
return
}
if len(pods.Items) > 0 {
@ -511,9 +468,6 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
e2elog.Logf("Failed to list pods: %v", err)
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
current = 0
@ -541,7 +495,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in
return false, nil
}
for _, pod := range pods.Items {
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
if !podutils.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
return false, nil
}
}
@ -574,3 +528,10 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
}
return podNames, nil
}
// WaitForPodContainerToFail waits for the given Pod container to fail with the given reason, specifically due to
// invalid container configuration. In this case, the container will remain in a waiting state with a specific
// reason set, which should match the given reason.
func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string, containerIndex int, reason string, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, podContainerFailed(c, namespace, podName, containerIndex, reason))
}

View File

@ -31,9 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubectl/pkg/util/podutils"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -42,13 +40,27 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// DefaultPodDeletionTimeout is the default timeout for deleting pod
const DefaultPodDeletionTimeout = 3 * time.Minute
const (
// DefaultPodDeletionTimeout is the default timeout for deleting pod
DefaultPodDeletionTimeout = 3 * time.Minute
// ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and
// the images in the white list should be pre-pulled in the test suite. Currently, this is only used by
// the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events
killingContainer = "Killing"
// the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events
failedToCreateContainer = "Failed"
// the status of container event, copied from k8s.io/kubernetes/pkg/kubelet/events
startedContainer = "Started"
// it is copied from k8s.io/kubernetes/pkg/kubelet/sysctl
forbiddenReason = "SysctlForbidden"
)
// ImagePrePullList is the images used in the current test suite. It should be initialized in test suite and
// the images in the list should be pre-pulled in the test suite. Currently, this is only used by
// node e2e test.
var ImageWhiteList sets.String
var ImagePrePullList sets.String
// PodClient is a convenience method for getting a pod client interface in the framework's namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
@ -173,10 +185,10 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
// in the test anyway.
continue
}
// If the image policy is not PullAlways, the image must be in the white list and
// If the image policy is not PullAlways, the image must be in the pre-pull list and
// pre-pulled.
gomega.Expect(ImageWhiteList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in white list should have
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in pre-pull list should have
// been prepulled.
c.ImagePullPolicy = v1.PullNever
}
@ -197,7 +209,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
return false, nil
}
},
)).To(gomega.Succeed(), "wait for pod %q to success", name)
)).To(gomega.Succeed(), "wait for pod %q to succeed", name)
}
// WaitForFinish waits for pod to finish running, regardless of success or failure.
@ -227,10 +239,10 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
}
for _, e := range evnts.Items {
switch e.Reason {
case events.KillingContainer, events.FailedToCreateContainer, sysctl.ForbiddenReason:
case killingContainer, failedToCreateContainer, forbiddenReason:
ev = &e
return true, nil
case events.StartedContainer:
case startedContainer:
return true, nil
default:
// ignore all other errors
@ -262,5 +274,5 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
ExpectNoError(err)
return podutil.IsPodReady(pod)
return podutils.IsPodReady(pod)
}

32
vendor/k8s.io/kubernetes/test/e2e/framework/ports.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
// NOTE: constants in this file are copied from pkg/cluster/ports/ports.go
const (
// KubeletPort is the default port for the kubelet server on each host machine.
// May be overridden by a flag at startup.
KubeletPort = 10250
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
// Deprecated: use the secure KubeControllerManagerPort instead.
InsecureKubeControllerManagerPort = 10252
// KubeControllerManagerPort is the default port for the controller manager status server.
// May be overridden by a flag at startup.
KubeControllerManagerPort = 10257
)

View File

@ -19,6 +19,7 @@ package framework
import (
"context"
"fmt"
"strings"
"sync"
v1 "k8s.io/api/core/v1"
@ -29,16 +30,22 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/test/e2e/framework/auth"
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
)
const (
podSecurityPolicyPrivileged = "e2e-test-privileged-psp"
// allowAny is the wildcard used to allow any profile.
allowAny = "*"
// allowedProfilesAnnotationKey specifies the allowed seccomp profiles.
allowedProfilesAnnotationKey = "seccomp.security.alpha.kubernetes.io/allowedProfileNames"
)
var (
@ -52,7 +59,7 @@ func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
return &policyv1beta1.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
Annotations: map[string]string{allowedProfilesAnnotationKey: allowAny},
},
Spec: policyv1beta1.PodSecurityPolicySpec{
Privileged: true,
@ -87,14 +94,34 @@ func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool {
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
isPSPEnabled = false
} else if psps == nil || len(psps.Items) == 0 {
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
isPSPEnabled = false
} else {
Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.")
isPSPEnabled = true
return
}
if psps == nil || len(psps.Items) == 0 {
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
return
}
Logf("Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled")
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{GenerateName: "psp-test-pod-"},
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "test", Image: imageutils.GetPauseImageName()}}},
}
dryRunPod, err := kubeClient.CoreV1().Pods("kube-system").Create(context.TODO(), testPod, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
if strings.Contains(err.Error(), "PodSecurityPolicy") {
Logf("PodSecurityPolicy error creating dryrun pod; assuming PodSecurityPolicy is enabled: %v", err)
isPSPEnabled = true
} else {
Logf("Error creating dryrun pod; assuming PodSecurityPolicy is disabled: %v", err)
}
return
}
pspAnnotation, pspAnnotationExists := dryRunPod.Annotations["kubernetes.io/psp"]
if !pspAnnotationExists {
Logf("No PSP annotation exists on dry run pod; assuming PodSecurityPolicy is disabled")
return
}
Logf("PSP annotation exists on dry run pod: %q; assuming PodSecurityPolicy is enabled", pspAnnotation)
isPSPEnabled = true
})
return isPSPEnabled
}
@ -123,7 +150,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
}
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
// Create the Role to bind it to the namespace.
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
@ -140,10 +167,10 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
}
})
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
podSecurityPolicyPrivileged, namespace))
err := auth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
podSecurityPolicyPrivileged,
namespace,
rbacv1.Subject{
@ -152,7 +179,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
Name: "default",
})
ExpectNoError(err)
ExpectNoError(auth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
}

View File

@ -6,8 +6,6 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/pv",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/storage/v1/util:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@ -17,6 +15,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/skipper:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)

View File

@ -19,6 +19,7 @@ package framework
import (
"context"
"fmt"
"k8s.io/kubernetes/test/e2e/storage/utils"
"time"
"github.com/onsi/ginkgo"
@ -29,8 +30,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
)
@ -53,6 +52,19 @@ const (
// VolumeSelectorKey is the key for volume selector.
VolumeSelectorKey = "e2e-pv-pool"
// isDefaultStorageClassAnnotation represents a StorageClass annotation that
// marks a class as the default StorageClass
isDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
// betaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation.
// TODO: remove Beta when no longer used
betaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
// volumeGidAnnotationKey is the of the annotation on the PersistentVolume
// object that specifies a supplemental GID.
// it is copied from k8s.io/kubernetes/pkg/volume/util VolumeGidAnnotationKey
volumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
)
var (
@ -117,6 +129,8 @@ type PersistentVolumeConfig struct {
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to
// generate a PVC object.
type PersistentVolumeClaimConfig struct {
// Name of the PVC. If set, overrides NamePrefix
Name string
// NamePrefix defaults to "pvc-" if unspecified
NamePrefix string
// ClaimSize must be specified in the Quantity format. Defaults to 2Gi if
@ -570,7 +584,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
GenerateName: pvConfig.NamePrefix,
Labels: pvConfig.Labels,
Annotations: map[string]string{
util.VolumeGidAnnotationKey: "777",
volumeGidAnnotationKey: "777",
},
},
Spec: v1.PersistentVolumeSpec{
@ -610,6 +624,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: cfg.Name,
GenerateName: cfg.NamePrefix,
Namespace: ns,
Annotations: cfg.Annotations,
@ -703,12 +718,12 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err)
continue
}
if pv.Status.Phase == phase {
@ -721,24 +736,25 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, Poll, timeout, true)
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, poll, timeout, true)
}
// WaitForPersistentVolumeClaimsPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims
// to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, Poll, timeout time.Duration, matchAny bool) error {
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, poll, timeout time.Duration, matchAny bool) error {
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, poll, err)
phaseFoundInAllClaims = false
break
}
if pvc.Status.Phase == phase {
framework.Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
@ -779,7 +795,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
}
var scName string
for _, sc := range list.Items {
if storageutil.IsDefaultAnnotation(sc.ObjectMeta) {
if isDefaultAnnotation(sc.ObjectMeta) {
if len(scName) != 0 {
return "", fmt.Errorf("Multiple default storage classes found: %q and %q", scName, sc.Name)
}
@ -793,6 +809,20 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
return scName, nil
}
// isDefaultAnnotation returns a boolean if the default storage class
// annotation is set
// TODO: remove Beta when no longer needed
func isDefaultAnnotation(obj metav1.ObjectMeta) bool {
if obj.Annotations[isDefaultStorageClassAnnotation] == "true" {
return true
}
if obj.Annotations[betaIsDefaultStorageClassAnnotation] == "true" {
return true
}
return false
}
// SkipIfNoDefaultStorageClass skips tests if no default SC can be found.
func SkipIfNoDefaultStorageClass(c clientset.Interface) {
_, err := GetDefaultStorageClassName(c)
@ -800,3 +830,49 @@ func SkipIfNoDefaultStorageClass(c clientset.Interface) {
e2eskipper.Skipf("error finding default storageClass : %v", err)
}
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err == nil {
framework.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
}
if apierrors.IsNotFound(err) {
framework.Logf("PersistentVolume %s was removed", pvName)
return nil
}
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err)
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPVCFinalizer waits for a finalizer to be added to a PVC in a given namespace.
func WaitForPVCFinalizer(ctx context.Context, cs clientset.Interface, name, namespace, finalizer string, poll, timeout time.Duration) error {
var (
err error
pvc *v1.PersistentVolumeClaim
)
framework.Logf("Waiting up to %v for PersistentVolumeClaim %s/%s to contain finalizer %s", timeout, namespace, name, finalizer)
if successful := utils.WaitUntil(poll, timeout, func() bool {
pvc, err = cs.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get PersistentVolumeClaim %s/%s with err: %v. Will retry in %v", name, namespace, err, timeout)
return false
}
for _, f := range pvc.Finalizers {
if f == finalizer {
return true
}
}
return false
}); successful {
return nil
}
if err == nil {
err = fmt.Errorf("finalizer %s not added to pvc %s/%s", finalizer, namespace, name)
}
return err
}

View File

@ -1,33 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["runtimeobj.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/resource",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,130 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
)
var (
kindReplicationController = schema.GroupKind{Kind: "ReplicationController"}
kindExtensionsReplicaSet = schema.GroupKind{Group: "extensions", Kind: "ReplicaSet"}
kindAppsReplicaSet = schema.GroupKind{Group: "apps", Kind: "ReplicaSet"}
kindExtensionsDeployment = schema.GroupKind{Group: "extensions", Kind: "Deployment"}
kindAppsDeployment = schema.GroupKind{Group: "apps", Kind: "Deployment"}
kindExtensionsDaemonSet = schema.GroupKind{Group: "extensions", Kind: "DaemonSet"}
kindBatchJob = schema.GroupKind{Group: "batch", Kind: "Job"}
)
// GetRuntimeObjectForKind returns a runtime.Object based on its GroupKind,
// namespace and name.
func GetRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case kindReplicationController:
return c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindExtensionsReplicaSet, kindAppsReplicaSet:
return c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindExtensionsDeployment, kindAppsDeployment:
return c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindExtensionsDaemonSet:
return c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
case kindBatchJob:
return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}
}
// GetSelectorFromRuntimeObject returns the labels for the given object.
func GetSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensionsv1beta1.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *appsv1.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensionsv1beta1.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *appsv1.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensionsv1beta1.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *appsv1.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batchv1.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
default:
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
}
}
// GetReplicasFromRuntimeObject returns the number of replicas for the given
// object.
func GetReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensionsv1beta1.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *appsv1.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensionsv1beta1.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *appsv1.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensionsv1beta1.DaemonSet:
return 0, nil
case *appsv1.DaemonSet:
return 0, nil
case *batchv1.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
// that actually finish we need a better way to do this.
if typed.Spec.Parallelism != nil {
return *typed.Spec.Parallelism, nil
}
return 0, nil
default:
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
}
}

View File

@ -23,6 +23,7 @@ import (
"encoding/json"
"fmt"
"math"
"regexp"
"sort"
"strconv"
"strings"
@ -32,11 +33,10 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/system"
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@ -296,7 +296,7 @@ func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alp
data, err := c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Name(fmt.Sprintf("%v:%v", nodeName, KubeletPort)).
Suffix("stats/summary").
Do(ctx).Raw()
@ -371,6 +371,29 @@ const (
MasterAndDNSNodes NodesSet = 2
)
// nodeHasControlPlanePods returns true if specified node has control plane pods
// (kube-scheduler and/or kube-controller-manager).
func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, error) {
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) < 1 {
Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
}
for _, pod := range podList.Items {
if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) {
return true, nil
}
}
return false, nil
}
// NewResourceUsageGatherer returns a new ContainerResourceGatherer.
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
g := ContainerResourceGatherer{
@ -405,11 +428,23 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
dnsNodes := make(map[string]bool)
for _, pod := range pods.Items {
if (options.Nodes == MasterNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) {
continue
if options.Nodes == MasterNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
if !isControlPlane {
continue
}
}
if (options.Nodes == MasterAndDNSNodes) && !system.DeprecatedMightBeMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
continue
if options.Nodes == MasterAndDNSNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
if !isControlPlane && pod.Labels["k8s-app"] != "kube-dns" {
continue
}
}
for _, container := range pod.Status.InitContainerStatuses {
g.containerIDs = append(g.containerIDs, container.Name)
@ -428,7 +463,11 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
for _, node := range nodeList.Items {
if options.Nodes == AllNodes || system.DeprecatedMightBeMasterNode(node.Name) || dnsNodes[node.Name] {
isControlPlane, err := nodeHasControlPlanePods(c, node.Name)
if err != nil {
return nil, err
}
if options.Nodes == AllNodes || isControlPlane || dnsNodes[node.Name] {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
c: c,
@ -565,7 +604,7 @@ type kubemarkResourceUsage struct {
}
func getMasterUsageByPrefix(prefix string) (string, error) {
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), APIAddress()+":22", TestContext.Provider)
if err != nil {
return "", err
}

View File

@ -6,16 +6,18 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/skipper",
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],

View File

@ -30,19 +30,21 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilversion "k8s.io/apimachinery/pkg/util/version"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/features"
"k8s.io/component-base/featuregate"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
// TestContext should be used by all tests to access common context data.
var TestContext framework.TestContextType
// New local storage types to support local storage capacity isolation
var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
func skipInternalf(caller int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
@ -130,8 +132,8 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
// SkipUnlessLocalEphemeralStorageEnabled skips if the LocalStorageCapacityIsolation is not enabled.
func SkipUnlessLocalEphemeralStorageEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
skipInternalf(1, "Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation)
if !utilfeature.DefaultFeatureGate.Enabled(localStorageCapacityIsolation) {
skipInternalf(1, "Only supported when %v feature is enabled", localStorageCapacityIsolation)
}
}
@ -150,35 +152,35 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers
// SkipUnlessNodeCountIsAtLeast skips if the number of nodes is less than the minNodeCount.
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
skipInternalf(1, "Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
if framework.TestContext.CloudConfig.NumNodes < minNodeCount {
skipInternalf(1, "Requires at least %d nodes (not %d)", minNodeCount, framework.TestContext.CloudConfig.NumNodes)
}
}
// SkipUnlessNodeCountIsAtMost skips if the number of nodes is greater than the maxNodeCount.
func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
if TestContext.CloudConfig.NumNodes > maxNodeCount {
skipInternalf(1, "Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes)
if framework.TestContext.CloudConfig.NumNodes > maxNodeCount {
skipInternalf(1, "Requires at most %d nodes (not %d)", maxNodeCount, framework.TestContext.CloudConfig.NumNodes)
}
}
// SkipIfProviderIs skips if the provider is included in the unsupportedProviders.
func SkipIfProviderIs(unsupportedProviders ...string) {
if framework.ProviderIs(unsupportedProviders...) {
skipInternalf(1, "Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
skipInternalf(1, "Not supported for providers %v (found %s)", unsupportedProviders, framework.TestContext.Provider)
}
}
// SkipUnlessProviderIs skips if the provider is not included in the supportedProviders.
func SkipUnlessProviderIs(supportedProviders ...string) {
if !framework.ProviderIs(supportedProviders...) {
skipInternalf(1, "Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
skipInternalf(1, "Only supported for providers %v (not %s)", supportedProviders, framework.TestContext.Provider)
}
}
// SkipUnlessMultizone skips if the cluster does not have multizone.
func SkipUnlessMultizone(c clientset.Interface) {
zones, err := framework.GetClusterZones(c)
zones, err := e2enode.GetClusterZones(c)
if err != nil {
skipInternalf(1, "Error listing cluster zones")
}
@ -189,7 +191,7 @@ func SkipUnlessMultizone(c clientset.Interface) {
// SkipIfMultizone skips if the cluster has multizone.
func SkipIfMultizone(c clientset.Interface) {
zones, err := framework.GetClusterZones(c)
zones, err := e2enode.GetClusterZones(c)
if err != nil {
skipInternalf(1, "Error listing cluster zones")
}
@ -201,21 +203,28 @@ func SkipIfMultizone(c clientset.Interface) {
// SkipUnlessMasterOSDistroIs skips if the master OS distro is not included in the supportedMasterOsDistros.
func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) {
if !framework.MasterOSDistroIs(supportedMasterOsDistros...) {
skipInternalf(1, "Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro)
skipInternalf(1, "Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, framework.TestContext.MasterOSDistro)
}
}
// SkipUnlessNodeOSDistroIs skips if the node OS distro is not included in the supportedNodeOsDistros.
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
if !framework.NodeOSDistroIs(supportedNodeOsDistros...) {
skipInternalf(1, "Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
skipInternalf(1, "Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, framework.TestContext.NodeOSDistro)
}
}
// SkipUnlessNodeOSArchIs skips if the node OS distro is not included in the supportedNodeOsArchs.
func SkipUnlessNodeOSArchIs(supportedNodeOsArchs ...string) {
if !framework.NodeOSArchIs(supportedNodeOsArchs...) {
skipInternalf(1, "Only supported for node OS arch %v (not %s)", supportedNodeOsArchs, framework.TestContext.NodeOSArch)
}
}
// SkipIfNodeOSDistroIs skips if the node OS distro is included in the unsupportedNodeOsDistros.
func SkipIfNodeOSDistroIs(unsupportedNodeOsDistros ...string) {
if framework.NodeOSDistroIs(unsupportedNodeOsDistros...) {
skipInternalf(1, "Not supported for node OS distro %v (is %s)", unsupportedNodeOsDistros, TestContext.NodeOSDistro)
skipInternalf(1, "Not supported for node OS distro %v (is %s)", unsupportedNodeOsDistros, framework.TestContext.NodeOSDistro)
}
}
@ -232,8 +241,8 @@ func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersio
// SkipUnlessSSHKeyPresent skips if no SSH key is found.
func SkipUnlessSSHKeyPresent() {
if _, err := e2essh.GetSigner(TestContext.Provider); err != nil {
skipInternalf(1, "No SSH Key for provider %s: '%v'", TestContext.Provider, err)
if _, err := e2essh.GetSigner(framework.TestContext.Provider); err != nil {
skipInternalf(1, "No SSH Key for provider %s: '%v'", framework.TestContext.Provider, err)
}
}
@ -261,19 +270,41 @@ func SkipIfAppArmorNotSupported() {
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
func RunIfContainerRuntimeIs(runtimes ...string) {
for _, containerRuntime := range runtimes {
if containerRuntime == TestContext.ContainerRuntime {
if containerRuntime == framework.TestContext.ContainerRuntime {
return
}
}
skipInternalf(1, "Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes)
skipInternalf(1, "Skipped because container runtime %q is not in %s", framework.TestContext.ContainerRuntime, runtimes)
}
// RunIfSystemSpecNameIs runs if the system spec name is included in the names.
func RunIfSystemSpecNameIs(names ...string) {
for _, name := range names {
if name == TestContext.SystemSpecName {
if name == framework.TestContext.SystemSpecName {
return
}
}
skipInternalf(1, "Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
skipInternalf(1, "Skipped because system spec name %q is not in %v", framework.TestContext.SystemSpecName, names)
}
// SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem run if the component run as pods and client can delete them
func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(componentName string, c clientset.Interface, ns string, labelSet labels.Set) {
// verify if component run as pod
label := labels.SelectorFromSet(labelSet)
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts)
framework.Logf("SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem: %v, %v", pods, err)
if err != nil {
skipInternalf(1, "Skipped because client failed to get component:%s pod err:%v", componentName, err)
}
if len(pods.Items) == 0 {
skipInternalf(1, "Skipped because component:%s is not running as pod.", componentName)
}
// verify if client can delete pod
pod := pods.Items[0]
if err := c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
skipInternalf(1, "Skipped because client failed to delete component:%s pod, err:%v", componentName, err)
}
}

View File

@ -6,14 +6,12 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/ssh",
visibility = ["//visibility:public"],
deps = [
"//pkg/ssh:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
],

View File

@ -20,26 +20,27 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"time"
"github.com/onsi/gomega"
"golang.org/x/crypto/ssh"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
sshutil "k8s.io/kubernetes/pkg/ssh"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// ssh port
sshPort = "22"
// SSHPort is tcp port number of SSH
SSHPort = "22"
// pollNodeInterval is how often to Poll pods.
pollNodeInterval = 2 * time.Second
@ -54,7 +55,7 @@ const (
func GetSigner(provider string) (ssh.Signer, error) {
// honor a consistent SSH key across all providers
if path := os.Getenv("KUBE_SSH_KEY_PATH"); len(path) > 0 {
return sshutil.MakePrivateKeySignerFromFile(path)
return makePrivateKeySignerFromFile(path)
}
// Select the key itself to use. When implementing more providers here,
@ -93,11 +94,25 @@ func GetSigner(provider string) (ssh.Signer, error) {
keyfile = filepath.Join(keydir, keyfile)
}
return sshutil.MakePrivateKeySignerFromFile(keyfile)
return makePrivateKeySignerFromFile(keyfile)
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this
// excludes master node. If it can't find any external IPs, it falls back to
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
buffer, err := ioutil.ReadFile(key)
if err != nil {
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err)
}
signer, err := ssh.ParsePrivateKey(buffer)
if err != nil {
return nil, fmt.Errorf("error parsing SSH key: '%v'", err)
}
return signer, err
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes.
// If it can't find any external IPs, it falls back to
// looking for internal IPs. If it can't find an internal IP for every node it
// returns an error, though it still returns all hosts that it found in that
// case.
@ -120,7 +135,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
sshHosts = append(sshHosts, net.JoinHostPort(h, SSHPort))
}
return sshHosts, nil
}
@ -139,7 +154,7 @@ type Result struct {
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd, provider string) (Result, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), provider)
return SSH(cmd, net.JoinHostPort(nodeName, SSHPort), provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
@ -169,7 +184,7 @@ func SSH(cmd, host, provider string) (Result, error) {
return result, err
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
stdout, stderr, code, err := runSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
@ -177,6 +192,60 @@ func SSH(cmd, host, provider string) (Result, error) {
return result, err
}
// runSSHCommandViaBastion returns the stdout, stderr, and exit code from running cmd on
// host as specific user, along with any SSH-level error.
func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, int, error) {
if user == "" {
user = os.Getenv("USER")
}
// Setup the config, dial the server, and open a session.
config := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
client, err := ssh.Dial("tcp", host, config)
if err != nil {
err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
if client, err = ssh.Dial("tcp", host, config); err != nil {
return false, err
}
return true, nil
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err)
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err)
}
defer session.Close()
// Run the command.
code := 0
var bout, berr bytes.Buffer
session.Stdout, session.Stderr = &bout, &berr
if err = session.Run(cmd); err != nil {
// Check whether the command failed to run or didn't complete.
if exiterr, ok := err.(*ssh.ExitError); ok {
// If we got an ExitError and the exit code is nonzero, we'll
// consider the SSH itself successful (just that the command run
// errored on the host).
if code = exiterr.ExitStatus(); code != 0 {
err = nil
}
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err
}
// runSSHCommandViaBastion returns the stdout, stderr, and exit code from running cmd on
// host as specific user, along with any SSH-level error. It uses an SSH proxy to connect
// to bastion, then via that tunnel connects to the remote host. Similar to
@ -260,7 +329,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
host = net.JoinHostPort(a.Address, SSHPort)
break
}
}
@ -269,7 +338,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
// No external IPs were found, let's try to use internal as plan B
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeInternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
host = net.JoinHostPort(a.Address, SSHPort)
break
}
}
@ -323,9 +392,6 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil

View File

@ -17,9 +17,12 @@ limitations under the License.
package framework
import (
"crypto/rand"
"encoding/base64"
"flag"
"fmt"
"io/ioutil"
"math"
"os"
"sort"
"strings"
@ -31,12 +34,13 @@ import (
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog"
"k8s.io/klog/v2"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
)
const (
defaultHost = "http://127.0.0.1:8080"
defaultHost = "https://127.0.0.1:6443"
// DefaultNumNodes is the number of nodes. If not specified, then number of nodes is auto-detected
DefaultNumNodes = -1
@ -77,12 +81,16 @@ type TestContextType struct {
KubeVolumeDir string
CertDir string
Host string
BearerToken string `datapolicy:"token"`
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
RepoRoot string
DockershimCheckpointDir string
// ListImages will list off all images that are used then quit
ListImages bool
// ListConformanceTests will list off all conformance tests that are available then quit
ListConformanceTests bool
// Provider identifies the infrastructure provider (gce, gke, aws)
Provider string
@ -113,6 +121,7 @@ type TestContextType struct {
ImageServiceEndpoint string
MasterOSDistro string
NodeOSDistro string
NodeOSArch string
VerifyServiceAccount bool
DeleteNamespace bool
DeleteNamespaceOnFailure bool
@ -150,9 +159,6 @@ type TestContextType struct {
// Node e2e specific test context
NodeTestContextType
// Indicates what path the kubernetes-anywhere is installed on
KubernetesAnywherePath string
// The DNS Domain of the cluster.
ClusterDNSDomain string
@ -173,6 +179,9 @@ type TestContextType struct {
// SpecSummaryOutput is the file to write ginkgo.SpecSummary objects to as tests complete. Useful for debugging and test introspection.
SpecSummaryOutput string
// DockerConfigFile is a file that contains credentials which can be used to pull images from certain private registries, needed for a test.
DockerConfigFile string
}
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
@ -282,7 +291,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.BoolVar(&TestContext.DeleteNamespaceOnFailure, "delete-namespace-on-failure", true, "If true, framework will delete test namespace on failure. Used only during test debugging.")
flags.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If non-zero, framework will allow for that many non-ready nodes when checking for all ready nodes.")
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set", defaultHost))
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost))
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flags.Var(cliflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
@ -294,14 +303,15 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.")
flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
flags.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
flags.StringVar(&TestContext.KubernetesAnywherePath, "kubernetes-anywhere-path", "/workspace/k8s.io/kubernetes-anywhere", "Which directory kubernetes-anywhere is installed to.")
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.")
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.")
flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.")
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.")
flags.StringVar(&TestContext.DockerConfigFile, "docker-config-file", "", "A file that contains credentials which can be used to pull images from certain private registries, needed for a test.")
}
// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
@ -320,6 +330,7 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
flags.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, ubuntu, gci, coreos, or custom).")
flags.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, or custom).")
flags.StringVar(&TestContext.NodeOSArch, "node-os-arch", "amd64", "The OS architecture of cluster VM instances (amd64, arm64, or custom).")
flags.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.")
// TODO: Flags per provider? Rename gce-project/gce-zone?
@ -396,6 +407,21 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
return configCmd
}
// GenerateSecureToken returns a string of length tokenLen, consisting
// of random bytes encoded as base64 for use as a Bearer Token during
// communication with an APIServer
func GenerateSecureToken(tokenLen int) (string, error) {
// Number of bytes to be tokenLen when base64 encoded.
tokenSize := math.Ceil(float64(tokenLen) * 6 / 8)
rawToken := make([]byte, int(tokenSize))
if _, err := rand.Read(rawToken); err != nil {
return "", err
}
encoded := base64.RawURLEncoding.EncodeToString(rawToken)
token := encoded[:tokenLen]
return token, nil
}
// AfterReadingAllFlags makes changes to the context after all flags
// have been read.
func AfterReadingAllFlags(t *TestContextType) {
@ -415,6 +441,14 @@ func AfterReadingAllFlags(t *TestContextType) {
t.Host = defaultHost
}
}
if len(t.BearerToken) == 0 {
var err error
t.BearerToken, err = GenerateSecureToken(16)
if err != nil {
klog.Fatalf("Failed to generate bearer token: %v", err)
}
}
// Allow 1% of nodes to be unready (statistically) - relevant for large clusters.
if t.AllowedNotReadyNodes == 0 {
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100

View File

@ -0,0 +1,22 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["testfiles.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/testfiles",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,178 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package testfiles provides a wrapper around various optional ways
// of retrieving additional files needed during a test run:
// - builtin bindata
// - filesystem access
//
// Because it is a is self-contained package, it can be used by
// test/e2e/framework and test/e2e/manifest without creating
// a circular dependency.
package testfiles
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
)
var filesources []FileSource
// AddFileSource registers another provider for files that may be
// needed at runtime. Should be called during initialization of a test
// binary.
func AddFileSource(filesource FileSource) {
filesources = append(filesources, filesource)
}
// FileSource implements one way of retrieving test file content. For
// example, one file source could read from the original source code
// file tree, another from bindata compiled into a test executable.
type FileSource interface {
// ReadTestFile retrieves the content of a file that gets maintained
// alongside a test's source code. Files are identified by the
// relative path inside the repository containing the tests, for
// example "cluster/gce/upgrade.sh" inside kubernetes/kubernetes.
//
// When the file is not found, a nil slice is returned. An error is
// returned for all fatal errors.
ReadTestFile(filePath string) ([]byte, error)
// DescribeFiles returns a multi-line description of which
// files are available via this source. It is meant to be
// used as part of the error message when a file cannot be
// found.
DescribeFiles() string
}
// Read tries to retrieve the desired file content from
// one of the registered file sources.
func Read(filePath string) ([]byte, error) {
if len(filesources) == 0 {
return nil, fmt.Errorf("no file sources registered (yet?), cannot retrieve test file %s", filePath)
}
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err)
}
if data != nil {
return data, nil
}
}
// Here we try to generate an error that points test authors
// or users in the right direction for resolving the problem.
error := fmt.Sprintf("Test file %q was not found.\n", filePath)
for _, filesource := range filesources {
error += filesource.DescribeFiles()
error += "\n"
}
return nil, errors.New(error)
}
// Exists checks whether a file could be read. Unexpected errors
// are handled by calling the fail function, which then should
// abort the current test.
func Exists(filePath string) (bool, error) {
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
return false, err
}
if data != nil {
return true, nil
}
}
return false, nil
}
// RootFileSource looks for files relative to a root directory.
type RootFileSource struct {
Root string
}
// ReadTestFile looks for the file relative to the configured
// root directory. If the path is already absolute, for example
// in a test that has its own method of determining where
// files are, then the path will be used directly.
func (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) {
var fullPath string
if path.IsAbs(filePath) {
fullPath = filePath
} else {
fullPath = filepath.Join(r.Root, filePath)
}
data, err := ioutil.ReadFile(fullPath)
if os.IsNotExist(err) {
// Not an error (yet), some other provider may have the file.
return nil, nil
}
return data, err
}
// DescribeFiles explains that it looks for files inside a certain
// root directory.
func (r RootFileSource) DescribeFiles() string {
description := fmt.Sprintf("Test files are expected in %q", r.Root)
if !path.IsAbs(r.Root) {
// The default in test_context.go is the relative path
// ../../, which doesn't really help locating the
// actual location. Therefore we add also the absolute
// path if necessary.
abs, err := filepath.Abs(r.Root)
if err == nil {
description += fmt.Sprintf(" = %q", abs)
}
}
description += "."
return description
}
// BindataFileSource handles files stored in a package generated with bindata.
type BindataFileSource struct {
Asset func(string) ([]byte, error)
AssetNames func() []string
}
// ReadTestFile looks for an asset with the given path.
func (b BindataFileSource) ReadTestFile(filePath string) ([]byte, error) {
fileBytes, err := b.Asset(filePath)
if err != nil {
// It would be nice to have a better way to detect
// "not found" errors :-/
if strings.HasSuffix(err.Error(), "not found") {
return nil, nil
}
}
return fileBytes, nil
}
// DescribeFiles explains about gobindata and then lists all available files.
func (b BindataFileSource) DescribeFiles() string {
var lines []string
lines = append(lines, "The following files are built into the test executable via gobindata. For questions on maintaining gobindata, contact the sig-testing group.")
assets := b.AssetNames()
sort.Strings(assets)
lines = append(lines, assets...)
description := strings.Join(lines, "\n ")
return description
}

File diff suppressed because it is too large Load Diff

62
vendor/k8s.io/kubernetes/test/e2e/storage/utils/BUILD generated vendored Normal file
View File

@ -0,0 +1,62 @@
package(default_visibility = ["//visibility:public"])
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"create.go",
"deployment.go",
"ebs.go",
"framework.go",
"host_exec.go",
"local.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
deps = [
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/exec:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,637 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// LoadFromManifests loads .yaml or .json manifest files and returns
// all items that it finds in them. It supports all items for which
// there is a factory registered in factories and .yaml files with
// multiple items separated by "---". Files are accessed via the
// "testfiles" package, which means they can come from a file system
// or be built into the binary.
//
// LoadFromManifests has some limitations:
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
// and silently ignored
// - the latest stable API version for each item is used, regardless of what
// is specified in the manifest files
func LoadFromManifests(files ...string) ([]interface{}, error) {
var items []interface{}
err := visitManifests(func(data []byte) error {
// Ignore any additional fields for now, just determine what we have.
var what What
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, &what); err != nil {
return errors.Wrap(err, "decode TypeMeta")
}
factory := factories[what]
if factory == nil {
return errors.Errorf("item of type %+v not supported", what)
}
object := factory.New()
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, object); err != nil {
return errors.Wrapf(err, "decode %+v", what)
}
items = append(items, object)
return nil
}, files...)
return items, err
}
func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files {
data, err := e2etestfiles.Read(fileName)
if err != nil {
framework.Failf("reading manifest file: %v", err)
}
// Split at the "---" separator before working on
// individual item. Only works for .yaml.
//
// We need to split ourselves because we need access
// to each original chunk of data for
// runtime.DecodeInto. kubectl has its own
// infrastructure for this, but that is a lot of code
// with many dependencies.
items := bytes.Split(data, []byte("\n---"))
for _, item := range items {
if err := cb(item); err != nil {
return errors.Wrap(err, fileName)
}
}
}
return nil
}
// PatchItems modifies the given items in place such that each test
// gets its own instances, to avoid conflicts between different tests
// and between tests and normal deployments.
//
// This is done by:
// - creating namespaced items inside the test's namespace
// - changing the name of non-namespaced items like ClusterRole
//
// PatchItems has some limitations:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func PatchItems(f *framework.Framework, driverNamspace *v1.Namespace, items ...interface{}) error {
for _, item := range items {
// Uncomment when debugging the loading and patching of items.
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := patchItemRecursively(f, driverNamspace, item); err != nil {
return err
}
}
return nil
}
// CreateItems creates the items. Each of them must be an API object
// of a type that is registered in Factory.
//
// It returns either a cleanup function or an error, but never both.
//
// Cleaning up after a test can be triggered in two ways:
// - the test invokes the returned cleanup function,
// usually in an AfterEach
// - the test suite terminates, potentially after
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
//
// PatchItems has the some limitations as LoadFromManifests:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) (func(), error) {
var destructors []func() error
cleanup := func() {
// TODO (?): use same logic as framework.go for determining
// whether we are expected to clean up? This would change the
// meaning of the -delete-namespace and -delete-namespace-on-failure
// command line flags, because they would also start to apply
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrors.IsNotFound(err) {
framework.Logf("deleting failed: %s", err)
}
}
}
var result error
for _, item := range items {
// Each factory knows which item(s) it supports, so try each one.
done := false
description := describeItem(item)
// Uncomment this line to get a full dump of the entire item.
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
framework.Logf("creating %s", description)
for _, factory := range factories {
destructor, err := factory.Create(f, ns, item)
if destructor != nil {
destructors = append(destructors, func() error {
framework.Logf("deleting %s", description)
return destructor()
})
}
if err == nil {
done = true
break
} else if errors.Cause(err) != errorItemNotSupported {
result = err
break
}
}
if result == nil && !done {
result = errors.Errorf("item of type %T not supported", item)
break
}
}
if result != nil {
cleanup()
return nil, result
}
return cleanup, nil
}
// CreateFromManifests is a combination of LoadFromManifests,
// PatchItems, patching with an optional custom function,
// and CreateItems.
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
items, err := LoadFromManifests(files...)
if err != nil {
return nil, errors.Wrap(err, "CreateFromManifests")
}
if err := PatchItems(f, driverNamespace, items...); err != nil {
return nil, err
}
if patch != nil {
for _, item := range items {
if err := patch(item); err != nil {
return nil, err
}
}
}
return CreateItems(f, driverNamespace, items...)
}
// What is a subset of metav1.TypeMeta which (in contrast to
// metav1.TypeMeta itself) satisfies the runtime.Object interface.
type What struct {
Kind string `json:"kind"`
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new What.
func (in *What) DeepCopy() *What {
return &What{Kind: in.Kind}
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out.
func (in *What) DeepCopyInto(out *What) {
out.Kind = in.Kind
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *What) DeepCopyObject() runtime.Object {
return &What{Kind: in.Kind}
}
// GetObjectKind returns the ObjectKind schema
func (in *What) GetObjectKind() schema.ObjectKind {
return nil
}
// ItemFactory provides support for creating one particular item.
// The type gets exported because other packages might want to
// extend the set of pre-defined factories.
type ItemFactory interface {
// New returns a new empty item.
New() runtime.Object
// Create is responsible for creating the item. It returns an
// error or a cleanup function for the created item.
// If the item is of an unsupported type, it must return
// an error that has errorItemNotSupported as cause.
Create(f *framework.Framework, ns *v1.Namespace, item interface{}) (func() error, error)
}
// describeItem always returns a string that describes the item,
// usually by calling out to cache.MetaNamespaceKeyFunc which
// concatenates namespace (if set) and name. If that fails, the entire
// item gets converted to a string.
func describeItem(item interface{}) string {
key, err := cache.MetaNamespaceKeyFunc(item)
if err == nil && key != "" {
return fmt.Sprintf("%T: %s", item, key)
}
return fmt.Sprintf("%T: %s", item, item)
}
// errorItemNotSupported is the error that Create methods
// must return or wrap when they don't support the given item.
var errorItemNotSupported = errors.New("not supported")
var factories = map[What]ItemFactory{
{"ClusterRole"}: &clusterRoleFactory{},
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
{"CSIDriver"}: &csiDriverFactory{},
{"DaemonSet"}: &daemonSetFactory{},
{"Role"}: &roleFactory{},
{"RoleBinding"}: &roleBindingFactory{},
{"Secret"}: &secretFactory{},
{"Service"}: &serviceFactory{},
{"ServiceAccount"}: &serviceAccountFactory{},
{"StatefulSet"}: &statefulSetFactory{},
{"StorageClass"}: &storageClassFactory{},
}
// PatchName makes the name of some item unique by appending the
// generated unique name.
func PatchName(f *framework.Framework, item *string) {
if *item != "" {
*item = *item + "-" + f.UniqueName
}
}
// PatchNamespace moves the item into the test's namespace. Not
// all items can be namespaced. For those, the name also needs to be
// patched.
func PatchNamespace(f *framework.Framework, driverNamespace *v1.Namespace, item *string) {
if driverNamespace != nil {
*item = driverNamespace.GetName()
return
}
if f.Namespace != nil {
*item = f.Namespace.GetName()
}
}
func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace, item interface{}) error {
switch item := item.(type) {
case *rbacv1.Subject:
PatchNamespace(f, driverNamespace, &item.Namespace)
case *rbacv1.RoleRef:
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
// which contains all role names that are defined cluster-wide before the test starts?
// All those names are excempt from renaming. That list could be populated by querying
// and get extended by tests.
if item.Name != "e2e-test-privileged-psp" {
PatchName(f, &item.Name)
}
case *rbacv1.ClusterRole:
PatchName(f, &item.Name)
case *rbacv1.Role:
PatchNamespace(f, driverNamespace, &item.Namespace)
// Roles are namespaced, but because for RoleRef above we don't
// know whether the referenced role is a ClusterRole or Role
// and therefore always renames, we have to do the same here.
PatchName(f, &item.Name)
case *storagev1.StorageClass:
PatchName(f, &item.Name)
case *storagev1.CSIDriver:
PatchName(f, &item.Name)
case *v1.ServiceAccount:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
case *v1.Secret:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
case *rbacv1.ClusterRoleBinding:
PatchName(f, &item.Name)
for i := range item.Subjects {
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
return errors.Wrapf(err, "%T", f)
}
}
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
return errors.Wrapf(err, "%T", f)
}
case *rbacv1.RoleBinding:
PatchNamespace(f, driverNamespace, &item.Namespace)
for i := range item.Subjects {
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
return errors.Wrapf(err, "%T", f)
}
}
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
return errors.Wrapf(err, "%T", f)
}
case *v1.Service:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
case *appsv1.StatefulSet:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
return err
}
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
case *appsv1.DaemonSet:
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
return err
}
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
return err
}
default:
return errors.Errorf("missing support for patching item of type %T", item)
}
return nil
}
// The individual factories all follow the same template, but with
// enough differences in types and functions that copy-and-paste
// looked like the least dirty approach. Perhaps one day Go will have
// generics.
type serviceAccountFactory struct{}
func (f *serviceAccountFactory) New() runtime.Object {
return &v1.ServiceAccount{}
}
func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*v1.ServiceAccount)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create ServiceAccount")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type clusterRoleFactory struct{}
func (f *clusterRoleFactory) New() runtime.Object {
return &rbacv1.ClusterRole{}
}
func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.ClusterRole)
if !ok {
return nil, errorItemNotSupported
}
framework.Logf("Define cluster role %v", item.GetName())
client := f.ClientSet.RbacV1().ClusterRoles()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create ClusterRole")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type clusterRoleBindingFactory struct{}
func (f *clusterRoleBindingFactory) New() runtime.Object {
return &rbacv1.ClusterRoleBinding{}
}
func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.ClusterRoleBinding)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().ClusterRoleBindings()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create ClusterRoleBinding")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type roleFactory struct{}
func (f *roleFactory) New() runtime.Object {
return &rbacv1.Role{}
}
func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.Role)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().Roles(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create Role")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type roleBindingFactory struct{}
func (f *roleBindingFactory) New() runtime.Object {
return &rbacv1.RoleBinding{}
}
func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*rbacv1.RoleBinding)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().RoleBindings(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create RoleBinding")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type serviceFactory struct{}
func (f *serviceFactory) New() runtime.Object {
return &v1.Service{}
}
func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*v1.Service)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Services(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create Service")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type statefulSetFactory struct{}
func (f *statefulSetFactory) New() runtime.Object {
return &appsv1.StatefulSet{}
}
func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*appsv1.StatefulSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().StatefulSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create StatefulSet")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type daemonSetFactory struct{}
func (f *daemonSetFactory) New() runtime.Object {
return &appsv1.DaemonSet{}
}
func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*appsv1.DaemonSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().DaemonSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create DaemonSet")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type storageClassFactory struct{}
func (f *storageClassFactory) New() runtime.Object {
return &storagev1.StorageClass{}
}
func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*storagev1.StorageClass)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.StorageV1().StorageClasses()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create StorageClass")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type csiDriverFactory struct{}
func (f *csiDriverFactory) New() runtime.Object {
return &storagev1.CSIDriver{}
}
func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*storagev1.CSIDriver)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.StorageV1().CSIDrivers()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create CSIDriver")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
type secretFactory struct{}
func (f *secretFactory) New() runtime.Object {
return &v1.Secret{}
}
func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
item, ok := i.(*v1.Secret)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Secrets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, errors.Wrap(err, "create Secret")
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
}, nil
}
// PrettyPrint returns a human-readable representation of an item.
func PrettyPrint(item interface{}) string {
data, err := json.MarshalIndent(item, "", " ")
if err == nil {
return string(data)
}
return fmt.Sprintf("%+v", item)
}
// patchContainerImages replaces the specified Container Registry with a custom
// one provided via the KUBE_TEST_REPO_LIST env variable
func patchContainerImages(containers []v1.Container) error {
var err error
for i, c := range containers {
containers[i].Image, err = imageutils.ReplaceRegistryInImageURL(c.Image)
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,204 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"path"
"strings"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// PatchCSIDeployment modifies the CSI driver deployment:
// - replaces the provisioner name
// - forces pods onto a specific host
//
// All of that is optional, see PatchCSIOptions. Just beware
// that not renaming the CSI driver deployment can be problematic:
// - when multiple tests deploy the driver, they need
// to run sequentially
// - might conflict with manual deployments
//
// This function is written so that it works for CSI driver deployments
// that follow these conventions:
// - driver and provisioner names are identical
// - the driver binary accepts a --drivername parameter
// - the provisioner binary accepts a --provisioner parameter
// - the paths inside the container are either fixed
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
// okay) or are specified directly in a parameter (for example,
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
//
// Driver deployments that are different will have to do the patching
// without this function, or skip patching entirely.
func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interface{}) error {
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
o.OldDriverName != o.NewDriverName
patchVolumes := func(volumes []v1.Volume) {
if !rename {
return
}
for i := range volumes {
volume := &volumes[i]
if volume.HostPath != nil {
// Update paths like /var/lib/kubelet/plugins/<provisioner>.
p := &volume.HostPath.Path
dir, file := path.Split(*p)
if file == o.OldDriverName {
*p = path.Join(dir, o.NewDriverName)
}
}
}
}
patchContainers := func(containers []v1.Container) {
for i := range containers {
container := &containers[i]
if rename {
for e := range container.Args {
// Inject test-specific provider name into paths like this one:
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
container.Args[e] = strings.Replace(container.Args[e], "/"+o.OldDriverName+"/", "/"+o.NewDriverName+"/", 1)
}
}
// Overwrite driver name resp. provider name
// by appending a parameter with the right
// value.
switch container.Name {
case o.DriverContainerName:
container.Args = append(container.Args, o.DriverContainerArguments...)
case o.ProvisionerContainerName:
// Driver name is expected to be the same
// as the provisioner here.
container.Args = append(container.Args, "--provisioner="+o.NewDriverName)
}
}
}
patchPodSpec := func(spec *v1.PodSpec) {
patchContainers(spec.Containers)
patchVolumes(spec.Volumes)
if o.NodeName != "" {
e2epod.SetNodeSelection(spec, e2epod.NodeSelection{Name: o.NodeName})
}
}
switch object := object.(type) {
case *appsv1.ReplicaSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.DaemonSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.StatefulSet:
patchPodSpec(&object.Spec.Template.Spec)
case *appsv1.Deployment:
patchPodSpec(&object.Spec.Template.Spec)
case *storagev1.StorageClass:
if o.NewDriverName != "" {
// Driver name is expected to be the same
// as the provisioner name here.
object.Provisioner = o.NewDriverName
}
case *storagev1.CSIDriver:
if o.NewDriverName != "" {
object.Name = o.NewDriverName
}
if o.PodInfo != nil {
object.Spec.PodInfoOnMount = o.PodInfo
}
if o.StorageCapacity != nil {
object.Spec.StorageCapacity = o.StorageCapacity
}
if o.CanAttach != nil {
object.Spec.AttachRequired = o.CanAttach
}
if o.VolumeLifecycleModes != nil {
object.Spec.VolumeLifecycleModes = *o.VolumeLifecycleModes
}
if o.TokenRequests != nil {
object.Spec.TokenRequests = o.TokenRequests
}
if o.RequiresRepublish != nil {
object.Spec.RequiresRepublish = o.RequiresRepublish
}
if o.FSGroupPolicy != nil {
object.Spec.FSGroupPolicy = o.FSGroupPolicy
}
}
return nil
}
// PatchCSIOptions controls how PatchCSIDeployment patches the objects.
type PatchCSIOptions struct {
// The original driver name.
OldDriverName string
// The driver name that replaces the original name.
// Can be empty (not used at all) or equal to OldDriverName
// (then it will be added were appropriate without renaming
// in existing fields).
NewDriverName string
// The name of the container which has the CSI driver binary.
// If non-empty, DriverContainerArguments are added to argument
// list in container with that name.
DriverContainerName string
// List of arguments to add to container with
// DriverContainerName.
DriverContainerArguments []string
// The name of the container which has the provisioner binary.
// If non-empty, --provisioner with new name will be appended
// to the argument list.
ProvisionerContainerName string
// The name of the container which has the snapshotter binary.
// If non-empty, --snapshotter with new name will be appended
// to the argument list.
SnapshotterContainerName string
// If non-empty, all pods are forced to run on this node.
NodeName string
// If not nil, the value to use for the CSIDriver.Spec.PodInfo
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
PodInfo *bool
// If not nil, the value to use for the CSIDriver.Spec.CanAttach
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
CanAttach *bool
// If not nil, the value to use for the CSIDriver.Spec.StorageCapacity
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
StorageCapacity *bool
// If not nil, the value to use for the CSIDriver.Spec.VolumeLifecycleModes
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
VolumeLifecycleModes *[]storagev1.VolumeLifecycleMode
// If not nil, the value to use for the CSIDriver.Spec.TokenRequests
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
TokenRequests []storagev1.TokenRequest
// If not nil, the value to use for the CSIDriver.Spec.RequiresRepublish
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
RequiresRepublish *bool
// If not nil, the value to use for the CSIDriver.Spec.FSGroupPolicy
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
FSGroupPolicy *storagev1.FSGroupPolicy
}

263
vendor/k8s.io/kubernetes/test/e2e/storage/utils/ebs.go generated vendored Normal file
View File

@ -0,0 +1,263 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
const (
volumeAttachmentStatusPollDelay = 2 * time.Second
volumeAttachmentStatusFactor = 2
volumeAttachmentStatusSteps = 6
// represents expected attachment status of a volume after attach
volumeAttachedStatus = "attached"
// represents expected attachment status of a volume after detach
volumeDetachedStatus = "detached"
)
// EBSUtil provides functions to interact with EBS volumes
type EBSUtil struct {
client *ec2.EC2
validDevices []string
}
// NewEBSUtil returns an instance of EBSUtil which can be used to
// to interact with EBS volumes
func NewEBSUtil(client *ec2.EC2) *EBSUtil {
ebsUtil := &EBSUtil{client: client}
validDevices := []string{}
for _, firstChar := range []rune{'b', 'c'} {
for i := 'a'; i <= 'z'; i++ {
dev := string([]rune{firstChar, i})
validDevices = append(validDevices, dev)
}
}
ebsUtil.validDevices = validDevices
return ebsUtil
}
// AttachDisk attaches an EBS volume to a node.
func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
instance, err := findInstanceByNodeName(nodeName, ebs.client)
if err != nil {
return fmt.Errorf("error finding node %s: %v", nodeName, err)
}
err = ebs.waitForAvailable(volumeID)
if err != nil {
return fmt.Errorf("error waiting volume %s to be available: %v", volumeID, err)
}
device, err := ebs.findFreeDevice(instance)
if err != nil {
return fmt.Errorf("error finding free device on node %s: %v", nodeName, err)
}
hostDevice := "/dev/xvd" + string(device)
attachInput := &ec2.AttachVolumeInput{
VolumeId: &volumeID,
InstanceId: instance.InstanceId,
Device: &hostDevice,
}
_, err = ebs.client.AttachVolume(attachInput)
if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", volumeID, nodeName, err)
}
return ebs.waitForAttach(volumeID)
}
func (ebs *EBSUtil) findFreeDevice(instance *ec2.Instance) (string, error) {
deviceMappings := map[string]string{}
for _, blockDevice := range instance.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
name = strings.TrimPrefix(name, "/dev/sd")
name = strings.TrimPrefix(name, "/dev/xvd")
if len(name) < 1 || len(name) > 2 {
klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
}
deviceMappings[name] = aws.StringValue(blockDevice.Ebs.VolumeId)
}
for _, device := range ebs.validDevices {
if _, found := deviceMappings[device]; !found {
return device, nil
}
}
return "", fmt.Errorf("no available device")
}
func (ebs *EBSUtil) waitForAttach(volumeID string) error {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
time.Sleep(volumeAttachmentStatusPollDelay)
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
info, err := ebs.describeVolume(volumeID)
if err != nil {
return false, err
}
if len(info.Attachments) > 1 {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
}
attachmentStatus := ""
for _, a := range info.Attachments {
if attachmentStatus != "" {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
}
if a.State != nil {
attachmentStatus = *a.State
} else {
// Shouldn't happen; log so we know if it is
klog.Warningf("Ignoring nil attachment state for volume %q: %v", volumeID, a)
}
}
if attachmentStatus == "" {
attachmentStatus = volumeDetachedStatus
}
if attachmentStatus == volumeAttachedStatus {
// Attachment is in requested state, finish waiting
return true, nil
}
return false, nil
})
return err
}
func (ebs *EBSUtil) waitForAvailable(volumeID string) error {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
time.Sleep(volumeAttachmentStatusPollDelay)
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
info, err := ebs.describeVolume(volumeID)
if err != nil {
return false, err
}
volumeState := aws.StringValue(info.State)
if volumeState != ec2.VolumeStateAvailable {
return false, nil
}
return true, nil
})
return err
}
// Gets the full information about this volume from the EC2 API
func (ebs *EBSUtil) describeVolume(volumeID string) (*ec2.Volume, error) {
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
results := []*ec2.Volume{}
var nextToken *string
for {
response, err := ebs.client.DescribeVolumes(request)
if err != nil {
return nil, err
}
results = append(results, response.Volumes...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
if len(results) == 0 {
return nil, fmt.Errorf("no volumes found")
}
if len(results) > 1 {
return nil, fmt.Errorf("multiple volumes found")
}
return results[0], nil
}
func newEc2Filter(name string, value string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
Values: []*string{
aws.String(value),
},
}
return filter
}
func findInstanceByNodeName(nodeName string, cloud *ec2.EC2) (*ec2.Instance, error) {
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", nodeName),
}
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := describeInstances(request, cloud)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, nil
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
}
return instances[0], nil
}
func describeInstances(request *ec2.DescribeInstancesInput, cloud *ec2.EC2) ([]*ec2.Instance, error) {
// Instances are paged
results := []*ec2.Instance{}
var nextToken *string
for {
response, err := cloud.DescribeInstances(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err)
}
for _, reservation := range response.Reservations {
results = append(results, reservation.Instances...)
}
nextToken = response.NextToken
if nextToken == nil || len(*nextToken) == 0 {
break
}
request.NextToken = nextToken
}
return results, nil
}

View File

@ -0,0 +1,24 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import "github.com/onsi/ginkgo"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-storage] "+text, body)
}

View File

@ -0,0 +1,190 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
// Result holds the execution result of remote execution command.
type Result struct {
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// LogResult records result log
func LogResult(result Result) {
remote := result.Host
framework.Logf("exec %s: command: %s", remote, result.Cmd)
framework.Logf("exec %s: stdout: %q", remote, result.Stdout)
framework.Logf("exec %s: stderr: %q", remote, result.Stderr)
framework.Logf("exec %s: exit code: %d", remote, result.Code)
}
// HostExec represents interface we require to execute commands on remote host.
type HostExec interface {
Execute(cmd string, node *v1.Node) (Result, error)
IssueCommandWithResult(cmd string, node *v1.Node) (string, error)
IssueCommand(cmd string, node *v1.Node) error
Cleanup()
}
// hostExecutor implements HostExec
type hostExecutor struct {
*framework.Framework
nodeExecPods map[string]*v1.Pod
}
// NewHostExec returns a HostExec
func NewHostExec(framework *framework.Framework) HostExec {
return &hostExecutor{
Framework: framework,
nodeExecPods: make(map[string]*v1.Pod),
}
}
// launchNodeExecPod launches a hostexec pod for local PV and waits
// until it's Running.
func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
f := h.Framework
cs := f.ClientSet
ns := f.Namespace
hostExecPod := e2epod.NewExecPodSpec(ns.Name, "", true)
hostExecPod.GenerateName = fmt.Sprintf("hostexec-%s-", node)
// Use NodeAffinity instead of NodeName so that pods will not
// be immediately Failed by kubelet if it's out of space. Instead
// Pods will be pending in the scheduler until there is space freed
// up.
e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
hostExecPod.Spec.Volumes = []v1.Volume{
{
// Required to enter into host mount namespace via nsenter.
Name: "rootfs",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/",
},
},
},
}
hostExecPod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: "rootfs",
MountPath: "/rootfs",
ReadOnly: true,
},
}
hostExecPod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{
Privileged: func(privileged bool) *bool {
return &privileged
}(true),
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
framework.ExpectNoError(err)
return pod
}
// Execute executes the command on the given node. If there is no error
// performing the remote command execution, the stdout, stderr and exit code
// are returned.
// This works like ssh.SSH(...) utility.
func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
result, err := h.exec(cmd, node)
if codeExitErr, ok := err.(exec.CodeExitError); ok {
// extract the exit code of remote command and silence the command
// non-zero exit code error
result.Code = codeExitErr.ExitStatus()
err = nil
}
return result, err
}
func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
result := Result{
Host: node.Name,
Cmd: cmd,
}
pod, ok := h.nodeExecPods[node.Name]
if !ok {
pod = h.launchNodeExecPod(node.Name)
if pod == nil {
return result, fmt.Errorf("failed to create hostexec pod for node %q", node)
}
h.nodeExecPods[node.Name] = pod
}
args := []string{
"nsenter",
"--mount=/rootfs/proc/1/ns/mnt",
"--",
"sh",
"-c",
cmd,
}
containerName := pod.Spec.Containers[0].Name
var err error
result.Stdout, result.Stderr, err = h.Framework.ExecWithOptions(framework.ExecOptions{
Command: args,
Namespace: pod.Namespace,
PodName: pod.Name,
ContainerName: containerName,
Stdin: nil,
CaptureStdout: true,
CaptureStderr: true,
PreserveWhitespace: true,
})
return result, err
}
// IssueCommandWithResult issues command on the given node and returns stdout as
// result. It returns error if there are some issues executing the command or
// the command exits non-zero.
func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) {
result, err := h.exec(cmd, node)
if err != nil {
LogResult(result)
}
return result.Stdout, err
}
// IssueCommand works like IssueCommandWithResult, but discards result.
func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
_, err := h.IssueCommandWithResult(cmd, node)
return err
}
// Cleanup cleanup resources it created during test.
// Note that in most cases it is not necessary to call this because we create
// pods under test namespace which will be destroyed in teardown phase.
func (h *hostExecutor) Cleanup() {
for _, pod := range h.nodeExecPods {
e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
}
h.nodeExecPods = make(map[string]*v1.Pod)
}

View File

@ -0,0 +1,344 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
/*
* Various local test resource implementations.
*/
import (
"fmt"
"path/filepath"
"strings"
"github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
// LocalVolumeType represents type of local volume, e.g. tmpfs, directory,
// block, etc.
type LocalVolumeType string
const (
// LocalVolumeDirectory reprensents a simple directory as local volume
LocalVolumeDirectory LocalVolumeType = "dir"
// LocalVolumeDirectoryLink is like LocalVolumeDirectory but it's a symbolic link to directory
LocalVolumeDirectoryLink LocalVolumeType = "dir-link"
// LocalVolumeDirectoryBindMounted is like LocalVolumeDirectory but bind mounted
LocalVolumeDirectoryBindMounted LocalVolumeType = "dir-bindmounted"
// LocalVolumeDirectoryLinkBindMounted is like LocalVolumeDirectory but it's a symbolic link to self bind mounted directory
// Note that bind mounting at symbolic link actually mounts at directory it
// links to
LocalVolumeDirectoryLinkBindMounted LocalVolumeType = "dir-link-bindmounted"
// LocalVolumeTmpfs represents a temporary filesystem to be used as local volume
LocalVolumeTmpfs LocalVolumeType = "tmpfs"
// LocalVolumeBlock represents a Block device, creates a local file, and maps it as a block device
LocalVolumeBlock LocalVolumeType = "block"
// LocalVolumeBlockFS represents a filesystem backed by a block device
LocalVolumeBlockFS LocalVolumeType = "blockfs"
// LocalVolumeGCELocalSSD represents a Filesystem backed by GCE Local SSD as local volume
LocalVolumeGCELocalSSD LocalVolumeType = "gce-localssd-scsi-fs"
)
// LocalTestResource represents test resource of a local volume.
type LocalTestResource struct {
VolumeType LocalVolumeType
Node *v1.Node
// Volume path, path to filesystem or block device on the node
Path string
// If volume is backed by a loop device, we create loop device storage file
// under this directory.
loopDir string
}
// LocalTestResourceManager represents interface to create/destroy local test resources on node
type LocalTestResourceManager interface {
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
Remove(ltr *LocalTestResource)
}
// ltrMgr implements LocalTestResourceManager
type ltrMgr struct {
prefix string
hostExec HostExec
// hostBase represents a writable directory on the host under which we
// create test directories
hostBase string
}
// NewLocalResourceManager returns a instance of LocalTestResourceManager
func NewLocalResourceManager(prefix string, hostExec HostExec, hostBase string) LocalTestResourceManager {
return &ltrMgr{
prefix: prefix,
hostExec: hostExec,
hostBase: hostBase,
}
}
// getTestDir returns a test dir under `hostBase` directory with randome name.
func (l *ltrMgr) getTestDir() string {
testDirName := fmt.Sprintf("%s-%s", l.prefix, string(uuid.NewUUID()))
return filepath.Join(l.hostBase, testDirName)
}
func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
ginkgo.By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
ginkgo.By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
err := l.hostExec.IssueCommand(fmt.Sprintf("umount %q", ltr.Path), ltr.Node)
framework.ExpectNoError(err)
ginkgo.By("Removing the test directory")
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
framework.ExpectNoError(err)
}
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
ginkgo.By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
count := size / 4096
// xfs requires at least 4096 blocks
if count < 4096 {
count = 4096
}
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count)
losetupCmd := fmt.Sprintf("losetup -f %s/file", dir)
err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
framework.ExpectNoError(err)
}
// findLoopDevice finds loop device path by its associated storage directory.
func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string {
cmd := fmt.Sprintf("E2E_LOOP_DEV=$(losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir)
loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node)
framework.ExpectNoError(err)
return strings.TrimSpace(loopDevResult)
}
func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]string) *LocalTestResource {
loopDir := l.getTestDir()
l.createAndSetupLoopDevice(loopDir, node, 20*1024*1024)
loopDev := l.findLoopDevice(loopDir, node)
return &LocalTestResource{
Node: node,
Path: loopDev,
loopDir: loopDir,
}
}
// teardownLoopDevice tears down loop device by its associated storage directory.
func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
loopDev := l.findLoopDevice(dir, node)
ginkgo.By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("losetup -d %s", loopDev)
err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
framework.ExpectNoError(err)
return
}
func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
l.teardownLoopDevice(ltr.loopDir, ltr.Node)
ginkgo.By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource {
ltr := l.setupLocalVolumeBlock(node, parameters)
loopDev := ltr.Path
loopDir := ltr.loopDir
// Format and mount at loopDir and give others rwx for read/write testing
cmd := fmt.Sprintf("mkfs -t ext4 %s && mount -t ext4 %s %s && chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: loopDir,
loopDir: loopDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) {
umountCmd := fmt.Sprintf("umount %s", ltr.Path)
err := l.hostExec.IssueCommand(umountCmd, ltr.Node)
framework.ExpectNoError(err)
l.cleanupLocalVolumeBlock(ltr)
}
func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
err := l.hostExec.IssueCommand(mkdirCmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s", hostDir, hostDir, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
removeCmd := fmt.Sprintf("umount %s && rm -r %s", hostDir, hostDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm %s && umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource {
res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
framework.ExpectNoError(err)
dirName := strings.Fields(res)[0]
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
return &LocalTestResource{
Node: node,
Path: hostDir,
}
}
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
// This filesystem is attached in cluster initialization, we clean all files to make it reusable.
removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
var ltr *LocalTestResource
switch volumeType {
case LocalVolumeDirectory:
ltr = l.setupLocalVolumeDirectory(node, parameters)
case LocalVolumeDirectoryLink:
ltr = l.setupLocalVolumeDirectoryLink(node, parameters)
case LocalVolumeDirectoryBindMounted:
ltr = l.setupLocalVolumeDirectoryBindMounted(node, parameters)
case LocalVolumeDirectoryLinkBindMounted:
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(node, parameters)
case LocalVolumeTmpfs:
ltr = l.setupLocalVolumeTmpfs(node, parameters)
case LocalVolumeBlock:
ltr = l.setupLocalVolumeBlock(node, parameters)
case LocalVolumeBlockFS:
ltr = l.setupLocalVolumeBlockFS(node, parameters)
case LocalVolumeGCELocalSSD:
ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
default:
framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
return nil
}
if ltr == nil {
framework.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters)
}
ltr.VolumeType = volumeType
return ltr
}
func (l *ltrMgr) Remove(ltr *LocalTestResource) {
switch ltr.VolumeType {
case LocalVolumeDirectory:
l.cleanupLocalVolumeDirectory(ltr)
case LocalVolumeDirectoryLink:
l.cleanupLocalVolumeDirectoryLink(ltr)
case LocalVolumeDirectoryBindMounted:
l.cleanupLocalVolumeDirectoryBindMounted(ltr)
case LocalVolumeDirectoryLinkBindMounted:
l.cleanupLocalVolumeDirectoryLinkBindMounted(ltr)
case LocalVolumeTmpfs:
l.cleanupLocalVolumeTmpfs(ltr)
case LocalVolumeBlock:
l.cleanupLocalVolumeBlock(ltr)
case LocalVolumeBlockFS:
l.cleanupLocalVolumeBlockFS(ltr)
case LocalVolumeGCELocalSSD:
l.cleanupLocalVolumeGCELocalSSD(ltr)
default:
framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
}
return
}

View File

@ -0,0 +1,883 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"math/rand"
"path/filepath"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
// KubeletOpt type definition
type KubeletOpt string
const (
// NodeStateTimeout defines Timeout
NodeStateTimeout = 1 * time.Minute
// KStart defines start value
KStart KubeletOpt = "start"
// KStop defines stop value
KStop KubeletOpt = "stop"
// KRestart defines restart value
KRestart KubeletOpt = "restart"
)
const (
// ClusterRole name for e2e test Priveledged Pod Security Policy User
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
)
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
if framework.NodeOSDistroIs("windows") {
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
}
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
}
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(uexec.CodeExitError); ok {
exitCode := exiterr.ExitStatus()
framework.ExpectNoError(err,
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, err, stdout, stderr)
}
}
}
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
framework.ExpectEqual(expectedFSGroup, fsGroupResult,
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
}
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
stdout, stderr, err := PodExec(f, pod, shExec)
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
framework.ExpectEqual(actualExitCode, exitCode,
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
framework.ExpectNoError(err,
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, err, stdout, stderr)
}
}
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}
func isSudoPresent(nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
if !strings.Contains(sshResult.Stderr, "command not found") {
return true
}
return false
}
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
// Try externalAddress first
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If no externalAddress found, try internalAddress
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
if address.Address != "" {
return address.Address, nil
}
}
}
// If not found, return error
return "", fmt.Errorf("No address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
// for the desired statues..
// - First issues the command via `systemctl`
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
systemctlPresent := false
kubeletPid := ""
nodeIP, err := getHostAddress(c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
framework.Logf("Checking if systemctl command is present")
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
systemctlPresent = true
} else {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
if kOp == KRestart {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
}
}
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
time.Sleep(30 * time.Second)
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
}
// getKubeletMainPid return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
if systemctlPresent {
command = "systemctl status kubelet | grep 'Main PID'"
} else {
command = "service kubelet status | grep 'Main PID'"
}
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
framework.Logf("Attempting `%s`", command)
sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty")
return sshResult.Stdout
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written file is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
}
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
path := "/mnt/volume1"
byteLen := 64
seed := time.Now().UTC().UnixNano()
ginkgo.By("Writing to the volume.")
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
ginkgo.By("Testing that written pv is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
}
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
ginkgo.By("Expecting the volume mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
}
framework.ExpectNoError(err)
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
if err != nil {
framework.ExpectNoError(err, "Expected pod to be not found.")
}
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(30 * time.Second)
}
ginkgo.By("Expecting the volume mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
}
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
}
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
}
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err, "Failed to get nodeIP.")
nodeIP = nodeIP + ":22"
// Creating command to check whether path exists
podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd)
}
// Directories in the global directory have unpredictable names, however, device symlinks
// have the same name as pod.UID. So just find anything with pod.UID name.
globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID)
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd)
}
ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
result, err := e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Expecting the symlinks from global map path to be found.")
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
}
framework.ExpectNoError(err, "Failed to delete pod.")
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
framework.ExpectNoError(err, "Expected pod to be not found.")
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(30 * time.Second)
}
ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
result, err = e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
ginkgo.By("Expecting the symlinks from global map path not to be found.")
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.")
framework.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
}
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
}
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "volume-tester",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
},
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
// StartExternalProvisioner create external provisioner pod
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: imageutils.GetE2EImage(imageutils.NFSProvisioner),
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
ginkgo.By("locating the provisioner pod")
pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
// PrivilegedTestPSPClusterRoleBinding test Pod Security Policy Role bindings
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
namespace string,
teardown bool,
saNames []string) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
roleBindingClient := client.RbacV1().RoleBindings(namespace)
for _, saName := range saNames {
ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
binding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "psp-" + saName,
Namespace: namespace,
},
Subjects: []rbacv1.Subject{
{
Kind: rbacv1.ServiceAccountKind,
Name: saName,
Namespace: namespace,
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: podSecurityPolicyPrivilegedClusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
roleBindingClient.Delete(context.TODO(), binding.GetName(), metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
_, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{})
return apierrors.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for RBAC binding %s deletion: %v", binding.GetName(), err)
if teardown {
continue
}
_, err = roleBindingClient.Create(context.TODO(), binding, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
// CheckVolumeModeOfPath check mode of volume
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// Check if block exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
// Double check that it's not directory
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
} else {
// Check if directory exists
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
// Double check that it's not block
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
}
}
// CheckReadWriteToPath check that path can b e read and written
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
if volMode == v1.PersistentVolumeBlock {
// random -> file1
VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
// file1 -> dev (write to dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
// dev -> file2 (read from dev)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
// file1 == file2 (check contents)
VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
// Clean up temp files
VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
// Check that writing file to block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
} else {
// text -> file1 (write to file)
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
// grep file1 (read from file and check contents)
VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
// Check that writing to directory as block volume fails
VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
}
}
func readFile(content, path string) string {
if framework.NodeOSDistroIs("windows") {
return fmt.Sprintf("Select-String '%s' %s/file1.txt", content, path)
}
return fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)
}
// genBinDataFromSeed generate binData with random seed
func genBinDataFromSeed(len int, seed int64) []byte {
binData := make([]byte, len)
rand.Seed(seed)
_, err := rand.Read(binData)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
return binData
}
// CheckReadFromPath validate that file can be properly read.
//
// Note: directIO does not work with (default) BusyBox Pods. A requirement for
// directIO to function correctly, is to read whole sector(s) for Block-mode
// PVCs (normally a sector is 512 bytes), or memory pages for files (commonly
// 4096 bytes).
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
var pathForVolMode string
var iflag string
if volMode == v1.PersistentVolumeBlock {
pathForVolMode = path
} else {
pathForVolMode = filepath.Join(path, "file1.txt")
}
if directIO {
iflag = "iflag=direct"
}
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
}
// CheckWriteToPath that file can be properly written.
//
// Note: nocache does not work with (default) BusyBox Pods. To read without
// caching, enable directIO with CheckReadFromPath and check the hints about
// the len requirements.
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
var pathForVolMode string
var oflag string
if volMode == v1.PersistentVolumeBlock {
pathForVolMode = path
} else {
pathForVolMode = filepath.Join(path, "file1.txt")
}
if nocache {
oflag = "oflag=nocache"
}
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
}
// findMountPoints returns all mount points on given node under specified directory.
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
framework.ExpectNoError(err, "Encountered HostExec error.")
var mountPoints []string
if err != nil {
for _, line := range strings.Split(result, "\n") {
if line == "" {
continue
}
mountPoints = append(mountPoints, strings.TrimSuffix(line, " is a mountpoint"))
}
}
return mountPoints
}
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
}
// CreateDriverNamespace creates a namespace for CSI driver installation.
// The namespace is still tracked and ensured that gets deleted when test terminates.
func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
ginkgo.By(fmt.Sprintf("Building a driver namespace object, basename %s", f.Namespace.Name))
// The driver namespace will be bound to the test namespace in the prefix
namespace, err := f.CreateNamespace(f.Namespace.Name, map[string]string{
"e2e-framework": f.BaseName,
"e2e-test-namespace": f.Namespace.Name,
})
framework.ExpectNoError(err)
if framework.TestContext.VerifyServiceAccount {
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
framework.ExpectNoError(err)
} else {
framework.Logf("Skipping waiting for service account")
}
return namespace
}
// WaitForGVRDeletion waits until a non-namespaced object has been deleted
func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
if successful := WaitUntil(poll, timeout, func() bool {
_, err := c.Resource(gvr).Get(context.TODO(), objectName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
framework.Logf("%s %v is not found and has been deleted", gvr.Resource, objectName)
return true
} else if err != nil {
framework.Logf("Get %s returned an error: %v", objectName, err.Error())
} else {
framework.Logf("%s %v has been found and is not deleted", gvr.Resource, objectName)
}
return false
}); successful {
return nil
}
return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout)
}
// WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted
func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
if successful := WaitUntil(poll, timeout, func() bool {
_, err := c.Resource(gvr).Namespace(ns).Get(context.TODO(), objectName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
framework.Logf("%s %s is not found in namespace %s and has been deleted", gvr.Resource, objectName, ns)
return true
} else if err != nil {
framework.Logf("Get %s in namespace %s returned an error: %v", objectName, ns, err.Error())
} else {
framework.Logf("%s %s has been found in namespace %s and is not deleted", gvr.Resource, objectName, ns)
}
return false
}); successful {
return nil
}
return fmt.Errorf("%s %s in namespace %s is not deleted within %v", gvr.Resource, objectName, ns, timeout)
}
// WaitUntil runs checkDone until a timeout is reached
func WaitUntil(poll, timeout time.Duration, checkDone func() bool) bool {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
if checkDone() {
framework.Logf("WaitUntil finished successfully after %v", time.Since(start))
return true
}
}
framework.Logf("WaitUntil failed after reaching the timeout %v", timeout)
return false
}
// WaitForGVRFinalizer waits until a object from a given GVR contains a finalizer
// If namespace is empty, assume it is a non-namespaced object
func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName, objectNamespace, finalizer string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for object %s %s of resource %s to contain finalizer %s", timeout, objectNamespace, objectName, gvr.Resource, finalizer)
var (
err error
resource *unstructured.Unstructured
)
if successful := WaitUntil(poll, timeout, func() bool {
switch objectNamespace {
case "":
resource, err = c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{})
default:
resource, err = c.Resource(gvr).Namespace(objectNamespace).Get(ctx, objectName, metav1.GetOptions{})
}
if err != nil {
framework.Logf("Failed to get object %s %s with err: %v. Will retry in %v", objectNamespace, objectName, err, timeout)
return false
}
for _, f := range resource.GetFinalizers() {
if f == finalizer {
return true
}
}
return false
}); successful {
return nil
}
if err == nil {
err = fmt.Errorf("finalizer %s not added to object %s %s of resource %s", finalizer, objectNamespace, objectName, gvr)
}
return err
}
// VerifyFilePathGidInPod verfies expected GID of the target filepath
func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("ls -l %s", filePath)
stdout, stderr, err := PodExec(f, pod, cmd)
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid)
framework.ExpectEqual(ll[3], expectedGid)
}
// ChangeFilePathGidInPod changes the GID of the target filepath.
func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
_, _, err := PodExec(f, pod, cmd)
framework.ExpectNoError(err)
VerifyFilePathGidInPod(f, filePath, targetGid, pod)
}

View File

@ -1,32 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["system_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/system",
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["system_utils_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,39 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package system
import (
"strings"
)
// DeprecatedMightBeMasterNode returns true if given node is a registered master.
// This code must not be updated to use node role labels, since node role labels
// may not change behavior of the system.
// DEPRECATED: use a label selector provided by test initialization.
func DeprecatedMightBeMasterNode(nodeName string) bool {
// We are trying to capture "master(-...)?$" regexp.
// However, using regexp.MatchString() results even in more than 35%
// of all space allocations in ControllerManager spent in this function.
// That's why we are trying to be a bit smarter.
if strings.HasSuffix(nodeName, "master") {
return true
}
if len(nodeName) >= 10 {
return strings.HasSuffix(nodeName[:len(nodeName)-3], "master-")
}
return false
}

View File

@ -10,7 +10,6 @@ go_library(
srcs = [
"admission_webhook.go",
"audit.go",
"audit_dynamic.go",
"conditions.go",
"create_resources.go",
"delete_resources.go",
@ -36,7 +35,6 @@ go_library(
"//pkg/util/labels:go_default_library",
"//staging/src/k8s.io/api/admission/v1beta1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/auditregistration/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
@ -51,7 +49,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
@ -59,7 +56,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/audit:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
@ -69,8 +65,7 @@ go_library(
"//staging/src/k8s.io/kubectl/pkg/scale:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -1,191 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
auditregv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
auditv1 "k8s.io/apiserver/pkg/apis/audit/v1"
"k8s.io/apiserver/pkg/audit"
)
// AuditTestServer is a helper server for dynamic audit testing
type AuditTestServer struct {
Name string
LockedEventList *LockedEventList
Server *httptest.Server
t *testing.T
}
// LockedEventList is an event list with a lock for concurrent access
type LockedEventList struct {
*sync.RWMutex
EventList auditinternal.EventList
}
// NewLockedEventList returns a new LockedEventList
func NewLockedEventList() *LockedEventList {
return &LockedEventList{
RWMutex: &sync.RWMutex{},
EventList: auditinternal.EventList{},
}
}
// NewAuditTestServer returns a new audit test server
func NewAuditTestServer(t *testing.T, name string) *AuditTestServer {
s := &AuditTestServer{
Name: name,
LockedEventList: NewLockedEventList(),
t: t,
}
s.buildServer()
return s
}
// GetEventList safely returns the internal event list
func (a *AuditTestServer) GetEventList() auditinternal.EventList {
a.LockedEventList.RLock()
defer a.LockedEventList.RUnlock()
return a.LockedEventList.EventList
}
// ResetEventList resets the internal event list
func (a *AuditTestServer) ResetEventList() {
a.LockedEventList.Lock()
defer a.LockedEventList.Unlock()
a.LockedEventList.EventList = auditinternal.EventList{}
}
// AppendEvents will add the given events to the internal event list
func (a *AuditTestServer) AppendEvents(events []auditinternal.Event) {
a.LockedEventList.Lock()
defer a.LockedEventList.Unlock()
a.LockedEventList.EventList.Items = append(a.LockedEventList.EventList.Items, events...)
}
// WaitForEvents waits for the given events to arrive in the server or the 30s timeout is reached
func (a *AuditTestServer) WaitForEvents(expected []AuditEvent) ([]AuditEvent, error) {
var missing []AuditEvent
err := wait.PollImmediate(50*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
var err error
el := a.GetEventList()
if len(el.Items) < 1 {
return false, nil
}
missing, err = CheckAuditList(el, expected)
if err != nil {
return false, nil
}
return true, nil
})
return missing, err
}
// WaitForNumEvents checks that at least the given number of events has arrived or the 30s timeout is reached
func (a *AuditTestServer) WaitForNumEvents(numEvents int) error {
err := wait.PollImmediate(50*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
el := a.GetEventList()
if len(el.Items) < numEvents {
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("%v: %d events failed to arrive in %v", err, numEvents, wait.ForeverTestTimeout)
}
return nil
}
// Health polls the server healthcheck until successful or the 30s timeout has been reached
func (a *AuditTestServer) Health() error {
err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
resp, err := http.Get(fmt.Sprintf("%s/health", a.Server.URL))
if err != nil {
return false, nil
}
if resp.StatusCode != 200 {
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("server %s permanently failed health check: %v", a.Server.URL, err)
}
return nil
}
// Close the server
func (a *AuditTestServer) Close() {
a.Server.Close()
}
// BuildSinkConfiguration creates a generic audit sink configuration for this server
func (a *AuditTestServer) BuildSinkConfiguration() *auditregv1alpha1.AuditSink {
return &auditregv1alpha1.AuditSink{
ObjectMeta: metav1.ObjectMeta{
Name: a.Name,
},
Spec: auditregv1alpha1.AuditSinkSpec{
Policy: auditregv1alpha1.Policy{
Level: auditregv1alpha1.LevelRequestResponse,
Stages: []auditregv1alpha1.Stage{
auditregv1alpha1.StageResponseStarted,
auditregv1alpha1.StageResponseComplete,
},
},
Webhook: auditregv1alpha1.Webhook{
ClientConfig: auditregv1alpha1.WebhookClientConfig{
URL: &a.Server.URL,
},
},
},
}
}
// buildServer creates an http test server that will update the internal event list
// with the value it receives
func (a *AuditTestServer) buildServer() {
decoder := audit.Codecs.UniversalDecoder(auditv1.SchemeGroupVersion)
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
require.NoError(a.t, err, "could not read request body")
el := auditinternal.EventList{}
err = runtime.DecodeInto(decoder, body, &el)
r.Body.Close()
require.NoError(a.t, err, "failed decoding buf: %b, apiVersion: %s", body, auditv1.SchemeGroupVersion)
a.AppendEvents(el.Items)
w.WriteHeader(200)
})
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
})
a.Server = httptest.NewServer(mux)
}

View File

@ -27,10 +27,9 @@ import (
batch "k8s.io/api/batch/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
)
@ -54,19 +53,6 @@ func RetryWithExponentialBackOff(fn wait.ConditionFunc) error {
return wait.ExponentialBackoff(backoff, fn)
}
func IsRetryableAPIError(err error) bool {
// These errors may indicate a transient error that we can retry in tests.
if apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsServerTimeout(err) ||
apierrors.IsTooManyRequests(err) || utilnet.IsProbableEOF(err) || utilnet.IsConnectionReset(err) {
return true
}
// If the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
return true
}
return false
}
func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) error {
if obj == nil {
return fmt.Errorf("Object provided to create is empty")
@ -76,9 +62,6 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod)
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -93,9 +76,6 @@ func CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.Replic
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -110,9 +90,6 @@ func CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *a
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -127,9 +104,6 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -144,9 +118,6 @@ func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *ap
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -161,9 +132,6 @@ func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Jo
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -178,9 +146,6 @@ func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Se
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -195,9 +160,6 @@ func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -212,9 +174,6 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -229,9 +188,6 @@ func CreateStorageClassWithRetries(c clientset.Interface, obj *storage.StorageCl
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -246,9 +202,6 @@ func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -263,9 +216,6 @@ func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.Persistent
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)
@ -280,9 +230,6 @@ func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace str
if err == nil || apierrors.IsAlreadyExists(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(createFunc)

View File

@ -63,9 +63,6 @@ func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, nam
if err == nil || apierrors.IsNotFound(err) {
return true, nil
}
if IsRetryableAPIError(err) {
return false, nil
}
return false, fmt.Errorf("Failed to delete object with non-retriable error: %v", err)
}
return RetryWithExponentialBackOff(deleteFunc)

View File

@ -27,7 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"k8s.io/klog/v2"
)
const (

View File

@ -22,6 +22,7 @@ import (
"time"
"github.com/davecgh/go-spew/spew"
"github.com/pkg/errors"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@ -199,7 +200,18 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
if deployment == nil {
return errors.Wrapf(err, "error creating new replica set for deployment %q ", deploymentName)
}
deploymentImage := ""
if len(deployment.Spec.Template.Spec.Containers) > 0 {
deploymentImage = deployment.Spec.Template.Spec.Containers[0].Image
}
newRSImage := ""
if len(newRS.Spec.Template.Spec.Containers) > 0 {
newRSImage = newRS.Spec.Template.Spec.Containers[0].Image
}
return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deploymentImage, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRSImage, revision, image, err)
}
return nil
}

View File

@ -1,12 +1,13 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- justaugustus
- luxas
- mkumatag
- ixdy
- listx
approvers:
- luxas
- mkumatag
- ixdy
- listx
emeritus_approvers:
- ixdy

View File

@ -31,14 +31,15 @@ type RegistryList struct {
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
DockerGluster string `yaml:"dockerGluster"`
E2eRegistry string `yaml:"e2eRegistry"`
E2eVolumeRegistry string `yaml:"e2eVolumeRegistry"`
PromoterE2eRegistry string `yaml:"promoterE2eRegistry"`
BuildImageRegistry string `yaml:"buildImageRegistry"`
InvalidRegistry string `yaml:"invalidRegistry"`
GcRegistry string `yaml:"gcRegistry"`
SigStorageRegistry string `yaml:"sigStorageRegistry"`
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
GoogleContainerRegistry string `yaml:"googleContainerRegistry"`
PrivateRegistry string `yaml:"privateRegistry"`
SampleRegistry string `yaml:"sampleRegistry"`
K8sCSI string `yaml:"k8sCSI"`
}
// Config holds an images registry, name, and version
@ -69,15 +70,15 @@ func initReg() RegistryList {
DockerLibraryRegistry: "docker.io/library",
DockerGluster: "docker.io/gluster",
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
// TODO: After the domain flip, this should instead be k8s.gcr.io/k8s-artifacts-prod/e2e-test-images
PromoterE2eRegistry: "us.gcr.io/k8s-artifacts-prod/e2e-test-images",
E2eVolumeRegistry: "gcr.io/kubernetes-e2e-test-images/volume",
PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images",
BuildImageRegistry: "k8s.gcr.io/build-image",
InvalidRegistry: "invalid.com/invalid",
GcRegistry: "k8s.gcr.io",
SigStorageRegistry: "k8s.gcr.io/sig-storage",
GcrReleaseRegistry: "gcr.io/gke-release",
GoogleContainerRegistry: "gcr.io/google-containers",
PrivateRegistry: "gcr.io/k8s-authenticated-test",
SampleRegistry: "gcr.io/google-samples",
K8sCSI: "gcr.io/k8s-staging-csi",
}
repoList := os.Getenv("KUBE_TEST_REPO_LIST")
if repoList == "" {
@ -101,13 +102,14 @@ var (
dockerLibraryRegistry = registry.DockerLibraryRegistry
dockerGluster = registry.DockerGluster
e2eRegistry = registry.E2eRegistry
e2eVolumeRegistry = registry.E2eVolumeRegistry
promoterE2eRegistry = registry.PromoterE2eRegistry
buildImageRegistry = registry.BuildImageRegistry
gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry
gcRegistry = registry.GcRegistry
sigStorageRegistry = registry.SigStorageRegistry
gcrReleaseRegistry = registry.GcrReleaseRegistry
googleContainerRegistry = registry.GoogleContainerRegistry
invalidRegistry = registry.InvalidRegistry
k8sCSI = registry.K8sCSI
// PrivateRegistry is an image repository that requires authentication
PrivateRegistry = registry.PrivateRegistry
sampleRegistry = registry.SampleRegistry
@ -117,8 +119,10 @@ var (
)
const (
// None is to be used for unset/default images
None = iota
// Agnhost image
Agnhost = iota
Agnhost
// AgnhostPrivate image
AgnhostPrivate
// APIServer image
@ -137,6 +141,8 @@ const (
CudaVectorAdd
// CudaVectorAdd2 image
CudaVectorAdd2
// DebianIptables Image
DebianIptables
// EchoServer image
EchoServer
// Etcd image
@ -155,10 +161,6 @@ const (
JessieDnsutils
// Kitten image
Kitten
// Mounttest image
Mounttest
// MounttestUser image
MounttestUser
// Nautilus image
Nautilus
// NFSProvisioner image
@ -188,8 +190,6 @@ const (
ResourceConsumer
// SdDummyExporter image
SdDummyExporter
// StartupScript image
StartupScript
// VolumeNFSServer image
VolumeNFSServer
// VolumeISCSIServer image
@ -202,7 +202,7 @@ const (
func initImageConfigs() map[int]Config {
configs := map[int]Config{}
configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.12"}
configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.21"}
configs[AgnhostPrivate] = Config{PrivateRegistry, "agnhost", "2.6"}
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
configs[AuthenticatedWindowsNanoServer] = Config{gcAuthenticatedRegistry, "windows-nanoserver", "v1"}
@ -212,8 +212,9 @@ func initImageConfigs() map[int]Config {
configs[CheckMetadataConcealment] = Config{e2eRegistry, "metadata-concealment", "1.2"}
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{e2eRegistry, "cuda-vector-add", "2.0"}
configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.3.0"}
configs[EchoServer] = Config{e2eRegistry, "echoserver", "2.2"}
configs[Etcd] = Config{gcRegistry, "etcd", "3.4.3"}
configs[Etcd] = Config{gcRegistry, "etcd", "3.4.13-0"}
configs[GlusterDynamicProvisioner] = Config{dockerGluster, "glusterdynamic-provisioner", "v1.0"}
configs[Httpd] = Config{dockerLibraryRegistry, "httpd", "2.4.38-alpine"}
configs[HttpdNew] = Config{dockerLibraryRegistry, "httpd", "2.4.39-alpine"}
@ -221,10 +222,8 @@ func initImageConfigs() map[int]Config {
configs[IpcUtils] = Config{e2eRegistry, "ipc-utils", "1.0"}
configs[JessieDnsutils] = Config{e2eRegistry, "jessie-dnsutils", "1.0"}
configs[Kitten] = Config{e2eRegistry, "kitten", "1.0"}
configs[Mounttest] = Config{e2eRegistry, "mounttest", "1.0"}
configs[MounttestUser] = Config{e2eRegistry, "mounttest-user", "1.0"}
configs[Nautilus] = Config{e2eRegistry, "nautilus", "1.0"}
configs[NFSProvisioner] = Config{k8sCSI, "nfs-provisioner", "v2.2.2"}
configs[NFSProvisioner] = Config{sigStorageRegistry, "nfs-provisioner", "v2.2.2"}
configs[Nginx] = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"}
configs[NginxNew] = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"}
configs[Nonewprivs] = Config{e2eRegistry, "nonewprivs", "1.0"}
@ -238,11 +237,10 @@ func initImageConfigs() map[int]Config {
configs[RegressionIssue74839] = Config{e2eRegistry, "regression-issue-74839-amd64", "1.0"}
configs[ResourceConsumer] = Config{e2eRegistry, "resource-consumer", "1.5"}
configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"}
configs[StartupScript] = Config{googleContainerRegistry, "startup-script", "v1"}
configs[VolumeNFSServer] = Config{e2eRegistry, "volume/nfs", "1.0"}
configs[VolumeISCSIServer] = Config{e2eRegistry, "volume/iscsi", "2.0"}
configs[VolumeGlusterServer] = Config{e2eRegistry, "volume/gluster", "1.0"}
configs[VolumeRBDServer] = Config{e2eRegistry, "volume/rbd", "1.0.1"}
configs[VolumeNFSServer] = Config{e2eVolumeRegistry, "nfs", "1.0"}
configs[VolumeISCSIServer] = Config{e2eVolumeRegistry, "iscsi", "2.0"}
configs[VolumeGlusterServer] = Config{e2eVolumeRegistry, "gluster", "1.0"}
configs[VolumeRBDServer] = Config{e2eVolumeRegistry, "rbd", "1.0.1"}
return configs
}
@ -280,8 +278,12 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) {
switch registryAndUser {
case "gcr.io/kubernetes-e2e-test-images":
registryAndUser = e2eRegistry
case "gcr.io/kubernetes-e2e-test-images/volume":
registryAndUser = e2eVolumeRegistry
case "k8s.gcr.io":
registryAndUser = gcRegistry
case "k8s.gcr.io/sig-storage":
registryAndUser = sigStorageRegistry
case "gcr.io/k8s-authenticated-test":
registryAndUser = PrivateRegistry
case "gcr.io/google-samples":
@ -290,8 +292,6 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) {
registryAndUser = gcrReleaseRegistry
case "docker.io/library":
registryAndUser = dockerLibraryRegistry
case "gcr.io/k8s-staging-csi":
registryAndUser = k8sCSI
default:
if countParts == 1 {
// We assume we found an image from docker hub library

View File

@ -50,7 +50,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/klog"
"k8s.io/klog/v2"
)
const (
@ -635,37 +635,27 @@ func (config *RCConfig) create() error {
}
func (config *RCConfig) applyTo(template *v1.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, v1.EnvVar{Name: k, Value: v})
}
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, v1.EnvVar{Name: k, Value: v})
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
if config.NodeSelector != nil {
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
if config.Tolerations != nil {
template.Spec.Tolerations = append([]v1.Toleration{}, config.Tolerations...)
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, v1.ContainerPort{Name: k, ContainerPort: int32(v)})
}
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, v1.ContainerPort{Name: k, ContainerPort: int32(v)})
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, v1.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, v1.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
if config.CpuLimit > 0 || config.MemLimit > 0 || config.GpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits = v1.ResourceList{}
@ -941,7 +931,7 @@ type CountToStrategy struct {
}
type TestNodePreparer interface {
PrepareNodes() error
PrepareNodes(nextNodeIndex int) error
CleanupNodes() error
}
@ -980,22 +970,27 @@ func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, clie
}
type LabelNodePrepareStrategy struct {
LabelKey string
LabelValue string
LabelKey string
LabelValues []string
roundRobinIdx int
}
var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{}
func NewLabelNodePrepareStrategy(labelKey string, labelValue string) *LabelNodePrepareStrategy {
func NewLabelNodePrepareStrategy(labelKey string, labelValues ...string) *LabelNodePrepareStrategy {
return &LabelNodePrepareStrategy{
LabelKey: labelKey,
LabelValue: labelValue,
LabelKey: labelKey,
LabelValues: labelValues,
}
}
func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValue)
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValues[s.roundRobinIdx])
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
s.roundRobinIdx++
if s.roundRobinIdx == len(s.LabelValues) {
s.roundRobinIdx = 0
}
return []byte(patch)
}

View File

@ -19,7 +19,7 @@ package utils
import (
"io/ioutil"
"k8s.io/klog"
"k8s.io/klog/v2"
)
func MakeTempDirOrDie(prefix string, baseDir string) string {

View File

@ -38,9 +38,6 @@ const (
func RetryErrorCondition(condition wait.ConditionFunc) wait.ConditionFunc {
return func() (bool, error) {
done, err := condition()
if err != nil && IsRetryableAPIError(err) {
return false, nil
}
return done, err
}
}
@ -52,7 +49,7 @@ func ScaleResourceWithRetries(scalesGetter scaleclient.ScalesGetter, namespace,
ResourceVersion: "",
}
waitForReplicas := scale.NewRetryParams(waitRetryInterval, waitRetryTimeout)
cond := RetryErrorCondition(scale.ScaleCondition(scaler, preconditions, namespace, name, size, nil, gvr))
cond := RetryErrorCondition(scale.ScaleCondition(scaler, preconditions, namespace, name, size, nil, gvr, false))
err := wait.PollImmediate(updateRetryInterval, updateRetryTimeout, cond)
if err == nil {
err = scale.WaitForScaleHasDesiredReplicas(scalesGetter, gvr.GroupResource(), name, namespace, size, waitForReplicas)