rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2

Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.26.2 to 1.27.2.
- [Release notes](https://github.com/kubernetes/kubernetes/releases)
- [Commits](https://github.com/kubernetes/kubernetes/compare/v1.26.2...v1.27.2)

---
updated-dependencies:
- dependency-name: k8s.io/kubernetes
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-05-29 21:03:29 +00:00
committed by mergify[bot]
parent 0e79135419
commit 07b05616a0
1072 changed files with 208716 additions and 198880 deletions

View File

@ -1,272 +1,34 @@
rules:
- selectorRegexp: k8s[.]io/kubernetes/pkg/
# The core E2E framework is meant to be a normal Kubernetes client,
# which means that it shouldn't depend on internal
# code. But we are not there yet, so some exceptions
# have to be allowed. Over time the list of allowed
# packages should get shorter, not longer.
- selectorRegexp: ^k8s[.]io/kubernetes/pkg/
allowedPrefixes:
- k8s.io/kubernetes/pkg/api/legacyscheme
- k8s.io/kubernetes/pkg/api/service
- k8s.io/kubernetes/pkg/api/v1/pod
- k8s.io/kubernetes/pkg/api/v1/resource
- k8s.io/kubernetes/pkg/api/v1/service
- k8s.io/kubernetes/pkg/api/pod
- k8s.io/kubernetes/pkg/api/node
- k8s.io/kubernetes/pkg/api/persistentvolumeclaim
- k8s.io/kubernetes/pkg/apis/apps
- k8s.io/kubernetes/pkg/apis/apps/validation
- k8s.io/kubernetes/pkg/apis/autoscaling
- k8s.io/kubernetes/pkg/apis/batch
- k8s.io/kubernetes/pkg/apis/certificates
- k8s.io/kubernetes/pkg/apis/certificates/v1
- k8s.io/kubernetes/pkg/apis/core
- k8s.io/kubernetes/pkg/apis/core/helper
- k8s.io/kubernetes/pkg/apis/core/install
- k8s.io/kubernetes/pkg/apis/core/pods
- k8s.io/kubernetes/pkg/apis/core/v1
- k8s.io/kubernetes/pkg/apis/core/v1/helper
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
- k8s.io/kubernetes/pkg/apis/core/validation
- k8s.io/kubernetes/pkg/apis/extensions
- k8s.io/kubernetes/pkg/apis/networking
- k8s.io/kubernetes/pkg/apis/node
- k8s.io/kubernetes/pkg/apis/policy
- k8s.io/kubernetes/pkg/apis/policy/validation
- k8s.io/kubernetes/pkg/apis/scheduling
- k8s.io/kubernetes/pkg/apis/storage/v1/util
- k8s.io/kubernetes/pkg/capabilities
- k8s.io/kubernetes/pkg/client/conditions
- k8s.io/kubernetes/pkg/cloudprovider/providers
- k8s.io/kubernetes/pkg/controller
- k8s.io/kubernetes/pkg/controller/deployment/util
- k8s.io/kubernetes/pkg/controller/nodelifecycle
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
- k8s.io/kubernetes/pkg/credentialprovider/gcp
- k8s.io/kubernetes/pkg/credentialprovider/secrets
- k8s.io/kubernetes/pkg/features
- k8s.io/kubernetes/pkg/fieldpath
- k8s.io/kubernetes/pkg/kubectl
- k8s.io/kubernetes/pkg/kubectl/apps
- k8s.io/kubernetes/pkg/kubectl/describe
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
- k8s.io/kubernetes/pkg/kubectl/scheme
- k8s.io/kubernetes/pkg/kubectl/util
- k8s.io/kubernetes/pkg/kubectl/util/certificate
- k8s.io/kubernetes/pkg/kubectl/util/deployment
- k8s.io/kubernetes/pkg/kubectl/util/event
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
- k8s.io/kubernetes/pkg/kubectl/util/podutils
- k8s.io/kubernetes/pkg/kubectl/util/qos
- k8s.io/kubernetes/pkg/kubectl/util/rbac
- k8s.io/kubernetes/pkg/kubectl/util/resource
- k8s.io/kubernetes/pkg/kubectl/util/slice
- k8s.io/kubernetes/pkg/kubectl/util/storage
- k8s.io/kubernetes/pkg/kubelet
- k8s.io/kubernetes/pkg/kubelet/apis
- k8s.io/kubernetes/pkg/kubelet/apis/config
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
- k8s.io/kubernetes/pkg/kubelet/cadvisor
- k8s.io/kubernetes/pkg/kubelet/certificate
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
- k8s.io/kubernetes/pkg/kubelet/checkpoint
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
- k8s.io/kubernetes/pkg/kubelet/cloudresource
- k8s.io/kubernetes/pkg/kubelet/cm
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
- k8s.io/kubernetes/pkg/kubelet/cm/util
- k8s.io/kubernetes/pkg/kubelet/config
- k8s.io/kubernetes/pkg/kubelet/configmap
- k8s.io/kubernetes/pkg/kubelet/container
- k8s.io/kubernetes/pkg/kubelet/envvars
- k8s.io/kubernetes/pkg/kubelet/eviction
- k8s.io/kubernetes/pkg/kubelet/eviction/api
- k8s.io/kubernetes/pkg/kubelet/events
- k8s.io/kubernetes/pkg/kubelet/images
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
- k8s.io/kubernetes/pkg/kubelet/leaky
- k8s.io/kubernetes/pkg/kubelet/lifecycle
- k8s.io/kubernetes/pkg/kubelet/logs
- k8s.io/kubernetes/pkg/kubelet/metrics
- k8s.io/kubernetes/pkg/kubelet/network/dns
- k8s.io/kubernetes/pkg/kubelet/nodelease
- k8s.io/kubernetes/pkg/kubelet/nodestatus
- k8s.io/kubernetes/pkg/kubelet/oom
- k8s.io/kubernetes/pkg/kubelet/pleg
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
- k8s.io/kubernetes/pkg/kubelet/pod
- k8s.io/kubernetes/pkg/kubelet/preemption
- k8s.io/kubernetes/pkg/kubelet/prober
- k8s.io/kubernetes/pkg/kubelet/prober/results
- k8s.io/kubernetes/pkg/kubelet/qos
- k8s.io/kubernetes/pkg/kubelet/remote
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
- k8s.io/kubernetes/pkg/kubelet/server
- k8s.io/kubernetes/pkg/kubelet/server/metrics
- k8s.io/kubernetes/pkg/kubelet/server/portforward
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
- k8s.io/kubernetes/pkg/kubelet/server/stats
- k8s.io/kubernetes/pkg/kubelet/server/streaming
- k8s.io/kubernetes/pkg/kubelet/stats
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
- k8s.io/kubernetes/pkg/kubelet/status
- k8s.io/kubernetes/pkg/kubelet/secret
- k8s.io/kubernetes/pkg/kubelet/sysctl
- k8s.io/kubernetes/pkg/kubelet/types
- k8s.io/kubernetes/pkg/kubelet/token
- k8s.io/kubernetes/pkg/kubelet/util
- k8s.io/kubernetes/pkg/kubelet/util/format
- k8s.io/kubernetes/pkg/kubelet/util/manager
- k8s.io/kubernetes/pkg/kubelet/util/store
- k8s.io/kubernetes/pkg/kubelet/volumemanager
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
- k8s.io/kubernetes/pkg/kubemark
- k8s.io/kubernetes/pkg/cluster/ports
- k8s.io/kubernetes/pkg/probe
- k8s.io/kubernetes/pkg/probe/exec
- k8s.io/kubernetes/pkg/probe/http
- k8s.io/kubernetes/pkg/probe/tcp
- k8s.io/kubernetes/pkg/proxy
- k8s.io/kubernetes/pkg/proxy/apis
- k8s.io/kubernetes/pkg/proxy/apis/config
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
- k8s.io/kubernetes/pkg/proxy/config
- k8s.io/kubernetes/pkg/proxy/healthcheck
- k8s.io/kubernetes/pkg/proxy/iptables
- k8s.io/kubernetes/pkg/proxy/ipvs
- k8s.io/kubernetes/pkg/proxy/metaproxier
- k8s.io/kubernetes/pkg/proxy/metrics
- k8s.io/kubernetes/pkg/proxy/util
- k8s.io/kubernetes/pkg/registry/core/service/allocator
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
- k8s.io/kubernetes/pkg/scheduler/api
- k8s.io/kubernetes/pkg/scheduler/framework
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
- k8s.io/kubernetes/pkg/scheduler/internal/heap
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/internal/queue
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/testing
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util
- k8s.io/kubernetes/pkg/scheduler/volumebinder
- k8s.io/kubernetes/pkg/scheduler
- k8s.io/kubernetes/pkg/scheduler/profile
- k8s.io/kubernetes/pkg/scheduler/testing
- k8s.io/kubernetes/pkg/security/apparmor
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
- k8s.io/kubernetes/pkg/securitycontext
- k8s.io/kubernetes/pkg/serviceaccount
- k8s.io/kubernetes/pkg/util/async
- k8s.io/kubernetes/pkg/util/bandwidth
- k8s.io/kubernetes/pkg/util/config
- k8s.io/kubernetes/pkg/util/configz
- k8s.io/kubernetes/pkg/util/conntrack
- k8s.io/kubernetes/pkg/util/env
- k8s.io/kubernetes/pkg/util/filesystem
- k8s.io/kubernetes/pkg/util/flag
- k8s.io/kubernetes/pkg/util/flock
- k8s.io/kubernetes/pkg/util/goroutinemap
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
- k8s.io/kubernetes/pkg/util/hash
- k8s.io/kubernetes/pkg/util/ipset
- k8s.io/kubernetes/pkg/util/iptables
- k8s.io/kubernetes/pkg/util/ipvs
- k8s.io/kubernetes/pkg/util/labels
- k8s.io/kubernetes/pkg/util/node
- k8s.io/kubernetes/pkg/util/oom
- k8s.io/kubernetes/pkg/util/parsers
- k8s.io/kubernetes/pkg/util/pod
- k8s.io/kubernetes/pkg/util/procfs
- k8s.io/kubernetes/pkg/util/removeall
- k8s.io/kubernetes/pkg/util/resizefs
- k8s.io/kubernetes/pkg/util/rlimit
- k8s.io/kubernetes/pkg/util/selinux
- k8s.io/kubernetes/pkg/util/slice
- k8s.io/kubernetes/pkg/util/sysctl
- k8s.io/kubernetes/pkg/util/system
- k8s.io/kubernetes/pkg/util/tail
- k8s.io/kubernetes/pkg/util/taints
- k8s.io/kubernetes/pkg/volume
- k8s.io/kubernetes/pkg/volume/util
- k8s.io/kubernetes/pkg/volume/util/fs
- k8s.io/kubernetes/pkg/volume/util/fsquota
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
- k8s.io/kubernetes/pkg/volume/util/subpath
- k8s.io/kubernetes/pkg/volume/util/types
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
- k8s.io/kubernetes/pkg/apis/authentication
- k8s.io/kubernetes/pkg/apis/authentication/v1
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
- k8s.io/kubernetes/pkg/apis/storage/v1
- k8s.io/kubernetes/pkg/scheduler/internal/cache
- selectorRegexp: k8s[.]io/kubernetes/test/
allowedPrefixes:
- k8s.io/kubernetes/test/e2e/common
- k8s.io/kubernetes/test/e2e/framework
- k8s.io/kubernetes/test/e2e/framework/auth
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
- k8s.io/kubernetes/test/e2e/framework/kubectl
- k8s.io/kubernetes/test/e2e/framework/log
- k8s.io/kubernetes/test/e2e/framework/metrics
- k8s.io/kubernetes/test/e2e/framework/network
- k8s.io/kubernetes/test/e2e/framework/node
- k8s.io/kubernetes/test/e2e/framework/pod
- k8s.io/kubernetes/test/e2e/framework/rc
- k8s.io/kubernetes/test/e2e/framework/resource
- k8s.io/kubernetes/test/e2e/framework/service
- k8s.io/kubernetes/test/e2e/framework/ssh
- k8s.io/kubernetes/test/e2e/framework/testfiles
- k8s.io/kubernetes/test/e2e/framework/websocket
- k8s.io/kubernetes/test/e2e/manifest
- k8s.io/kubernetes/test/e2e/perftype
- k8s.io/kubernetes/test/e2e/storage/utils
- k8s.io/kubernetes/test/e2e/system
- k8s.io/kubernetes/test/utils
- k8s.io/kubernetes/test/utils/image
# TODO: why is this here?
- selectorRegexp: k8s[.]io/kubernetes/third_party/
allowedPrefixes:
- k8s.io/kubernetes/third_party/forked/golang/expansion
- k8s.io/kubernetes/pkg/kubelet/apis/
# The following packages are okay to use:
#
# public API
- selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)/|^[a-z]+(/|$)|github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# stdlib
- selectorRegexp: ^[a-z]+(/|$)
allowedPrefixes: [ "" ]
# Ginkgo + Gomega.
- selectorRegexp: github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# some of the shared test helpers (but not E2E sub-packages!)
- selectorRegexp: ^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# Everything else isn't.
#
# In particular importing any test/e2e/framework/* package would be a
# violation (sub-packages get to use the framework, not the other way
# around).
- selectorRegexp: .

View File

@ -1,7 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-testing-approvers
- andrewsykim
- fabriziopandini
- pohly

View File

@ -57,7 +57,7 @@ ginkgo.AfterEach(func() {
# Do something with f.ClientSet.
}
ginkgo.It("test something", func() {
ginkgo.It("test something", func(ctx context.Context) {
# The actual test.
})
```

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -58,25 +58,25 @@ func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
}
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
func DumpAllNamespaceInfo(ctx context.Context, c clientset.Interface, namespace string) {
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.CoreV1().Events(ns).List(context.TODO(), opts)
return c.CoreV1().Events(ns).List(ctx, opts)
}, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace, framework.TestContext.ReportDir)
e2epod.DumpAllPodInfoForNamespace(ctx, c, namespace, framework.TestContext.ReportDir)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := framework.TestContext.MaxNodesToGather
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("unable to fetch node list: %v", err)
return
}
if len(nodes.Items) <= maxNodesForDump {
dumpAllNodeInfo(c, nodes)
dumpAllNodeInfo(ctx, c, nodes)
} else {
framework.Logf("skipping dumping cluster info - cluster too large")
}
@ -95,31 +95,31 @@ func (o byFirstTimestamp) Less(i, j int) bool {
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
}
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
func dumpAllNodeInfo(ctx context.Context, c clientset.Interface, nodes *v1.NodeList) {
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, framework.Logf)
DumpNodeDebugInfo(ctx, c, names, framework.Logf)
}
// DumpNodeDebugInfo dumps debug information of the given nodes.
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
func DumpNodeDebugInfo(ctx context.Context, c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(ctx, n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
for _, e := range getNodeEvents(ctx, c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := getKubeletPods(c, n)
podList, err := getKubeletPods(ctx, c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
continue
@ -135,13 +135,14 @@ func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(f
c.Name, c.Ready, c.RestartCount)
}
}
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
_, err = e2emetrics.HighLatencyKubeletOperations(ctx, c, 10*time.Second, n, logFunc)
framework.ExpectNoError(err)
// TODO: Log node resource info
}
}
// getKubeletPods retrieves the list of pods on the kubelet.
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
func getKubeletPods(ctx context.Context, c clientset.Interface, node string) (*v1.PodList, error) {
var client restclient.Result
finished := make(chan struct{}, 1)
go func() {
@ -151,7 +152,7 @@ func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, framework.KubeletPort)).
Suffix("pods").
Do(context.TODO())
Do(ctx)
finished <- struct{}{}
}()
@ -170,7 +171,7 @@ func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
func getNodeEvents(ctx context.Context, c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
@ -178,7 +179,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(ctx, options)
if err != nil {
framework.Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}

View File

@ -18,6 +18,7 @@ package debug
import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
@ -156,8 +157,8 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
}
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(c)
func NewLogsVerifier(ctx context.Context, c clientset.Interface) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(ctx, c)
framework.ExpectNoError(err)
instanceAddress := framework.APIAddress() + ":22"
@ -166,7 +167,6 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri
verifier := &LogsSizeVerifier{
client: c,
stopChannel: stopChannel,
data: prepareData(instanceAddress, nodeAddresses),
masterAddress: instanceAddress,
nodeAddresses: nodeAddresses,
@ -177,7 +177,6 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri
verifier.wg.Add(workersNo)
for i := 0; i < workersNo; i++ {
workers[i] = &LogSizeGatherer{
stopChannel: stopChannel,
data: verifier.data,
wg: &verifier.wg,
workChannel: workChannel,
@ -207,7 +206,7 @@ func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
}
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
func (s *LogsSizeVerifier) Run() {
func (s *LogsSizeVerifier) Run(ctx context.Context) {
s.workChannel <- WorkItem{
ip: s.masterAddress,
paths: masterLogsToCheck,
@ -221,15 +220,15 @@ func (s *LogsSizeVerifier) Run() {
}
}
for _, worker := range s.workers {
go worker.Run()
go worker.Run(ctx)
}
<-s.stopChannel
s.wg.Wait()
}
// Run starts log size gathering.
func (g *LogSizeGatherer) Run() {
for g.Work() {
func (g *LogSizeGatherer) Run(ctx context.Context) {
for g.Work(ctx) {
}
}
@ -245,7 +244,7 @@ func (g *LogSizeGatherer) pushWorkItem(workItem WorkItem) {
// Work does a single unit of work: tries to take out a WorkItem from the queue, ssh-es into a given machine,
// gathers data, writes it to the shared <data> map, and creates a gorouting which reinserts work item into
// the queue with a <pollingPeriod> delay. Returns false if worker should exit.
func (g *LogSizeGatherer) Work() bool {
func (g *LogSizeGatherer) Work(ctx context.Context) bool {
var workItem WorkItem
select {
case <-g.stopChannel:
@ -254,6 +253,7 @@ func (g *LogSizeGatherer) Work() bool {
case workItem = <-g.workChannel:
}
sshResult, err := e2essh.SSH(
ctx,
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
workItem.ip,
framework.TestContext.Provider,

View File

@ -181,10 +181,10 @@ type resourceGatherWorker struct {
printVerboseLogs bool
}
func (w *resourceGatherWorker) singleProbe() {
func (w *resourceGatherWorker) singleProbe(ctx context.Context) {
data := make(ResourceUsagePerContainer)
if w.inKubemark {
kubemarkData := getKubemarkMasterComponentsResourceUsage()
kubemarkData := getKubemarkMasterComponentsResourceUsage(ctx)
if kubemarkData == nil {
return
}
@ -319,22 +319,26 @@ func removeUint64Ptr(ptr *uint64) uint64 {
return *ptr
}
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
func (w *resourceGatherWorker) gather(ctx context.Context, initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer framework.Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }()
select {
case <-time.After(initialSleep):
w.singleProbe()
w.singleProbe(ctx)
for {
select {
case <-time.After(w.resourceDataGatheringPeriod):
w.singleProbe()
w.singleProbe(ctx)
case <-ctx.Done():
return
case <-w.stopCh:
return
}
}
case <-ctx.Done():
return
case <-w.stopCh:
return
}
@ -373,11 +377,11 @@ const (
// nodeHasControlPlanePods returns true if specified node has control plane pods
// (kube-scheduler and/or kube-controller-manager).
func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, error) {
func nodeHasControlPlanePods(ctx context.Context, c clientset.Interface, nodeName string) (bool, error) {
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(),
})
if err != nil {
@ -395,7 +399,7 @@ func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, erro
}
// NewResourceUsageGatherer returns a new ContainerResourceGatherer.
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
func NewResourceUsageGatherer(ctx context.Context, c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
g := ContainerResourceGatherer{
client: c,
stopCh: make(chan struct{}),
@ -420,7 +424,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
// Tracks kube-system pods if no valid PodList is passed in.
var err error
if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
pods, err = c.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("Error while listing Pods: %v", err)
return nil, err
@ -429,7 +433,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
dnsNodes := make(map[string]bool)
for _, pod := range pods.Items {
if options.Nodes == MasterNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
isControlPlane, err := nodeHasControlPlanePods(ctx, c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
@ -438,7 +442,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
}
if options.Nodes == MasterAndDNSNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
isControlPlane, err := nodeHasControlPlanePods(ctx, c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
@ -456,14 +460,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
dnsNodes[pod.Spec.NodeName] = true
}
}
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodeList, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("Error while listing Nodes: %v", err)
return nil, err
}
for _, node := range nodeList.Items {
isControlPlane, err := nodeHasControlPlanePods(c, node.Name)
isControlPlane, err := nodeHasControlPlanePods(ctx, c, node.Name)
if err != nil {
return nil, err
}
@ -491,14 +495,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
// StartGatheringData starts a stat gathering worker blocks for each node to track,
// and blocks until StopAndSummarize is called.
func (g *ContainerResourceGatherer) StartGatheringData() {
func (g *ContainerResourceGatherer) StartGatheringData(ctx context.Context) {
if len(g.workers) == 0 {
return
}
delayPeriod := g.options.ResourceDataGatheringPeriod / time.Duration(len(g.workers))
delay := time.Duration(0)
for i := range g.workers {
go g.workers[i].gather(delay)
go g.workers[i].gather(ctx, delay)
delay += delayPeriod
}
g.workerWg.Wait()
@ -603,8 +607,8 @@ type kubemarkResourceUsage struct {
CPUUsageInCores float64
}
func getMasterUsageByPrefix(prefix string) (string, error) {
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider)
func getMasterUsageByPrefix(ctx context.Context, prefix string) (string, error) {
sshResult, err := e2essh.SSH(ctx, fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider)
if err != nil {
return "", err
}
@ -612,10 +616,10 @@ func getMasterUsageByPrefix(prefix string) (string, error) {
}
// getKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name.
func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsage {
func getKubemarkMasterComponentsResourceUsage(ctx context.Context) map[string]*kubemarkResourceUsage {
result := make(map[string]*kubemarkResourceUsage)
// Get kubernetes component resource usage
sshResult, err := getMasterUsageByPrefix("kube")
sshResult, err := getMasterUsageByPrefix(ctx, "kube")
if err != nil {
framework.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
return nil
@ -633,7 +637,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
}
}
// Get etcd resource usage
sshResult, err = getMasterUsageByPrefix("bin/etcd")
sshResult, err = getMasterUsageByPrefix(ctx, "bin/etcd")
if err != nil {
framework.Logf("Error when trying to SSH to master machine. Skipping probe")
return nil

View File

@ -17,23 +17,300 @@ limitations under the License.
package framework
import (
"context"
"errors"
"fmt"
"strings"
"time"
ginkgotypes "github.com/onsi/ginkgo/v2/types"
"github.com/onsi/gomega"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
)
// MakeMatcher builds a gomega.Matcher based on a single callback function.
// That function is passed the actual value that is to be checked.
// There are three possible outcomes of the check:
// - An error is returned, which then is converted into a failure
// by Gomega.
// - A non-nil failure function is returned, which then is called
// by Gomega once a failure string is needed. This is useful
// to avoid unnecessarily preparing a failure string for intermediate
// failures in Eventually or Consistently.
// - Both function and error are nil, which means that the check
// succeeded.
func MakeMatcher[T interface{}](match func(actual T) (failure func() string, err error)) types.GomegaMatcher {
return &matcher[T]{
match: match,
}
}
type matcher[T interface{}] struct {
match func(actual T) (func() string, error)
failure func() string
}
func (m *matcher[T]) Match(actual interface{}) (success bool, err error) {
if actual, ok := actual.(T); ok {
failure, err := m.match(actual)
if err != nil {
return false, err
}
m.failure = failure
if failure != nil {
return false, nil
}
return true, nil
}
var empty T
return false, gomega.StopTrying(fmt.Sprintf("internal error: expected %T, got:\n%s", empty, format.Object(actual, 1)))
}
func (m *matcher[T]) FailureMessage(actual interface{}) string {
return m.failure()
}
func (m matcher[T]) NegatedFailureMessage(actual interface{}) string {
return m.failure()
}
var _ types.GomegaMatcher = &matcher[string]{}
// Gomega returns an interface that can be used like gomega to express
// assertions. The difference is that failed assertions are returned as an
// error:
//
// if err := Gomega().Expect(pod.Status.Phase).To(gomega.BeEqual(v1.Running)); err != nil {
// return fmt.Errorf("test pod not running: %w", err)
// }
//
// This error can get wrapped to provide additional context for the
// failure. The test then should use ExpectNoError to turn a non-nil error into
// a failure.
//
// When using this approach, there is no need for call offsets and extra
// descriptions for the Expect call because the call stack will be dumped when
// ExpectNoError is called and the additional description(s) can be added by
// wrapping the error.
//
// Asynchronous assertions use the framework's Poll interval and PodStart timeout
// by default.
func Gomega() GomegaInstance {
return gomegaInstance{}
}
type GomegaInstance interface {
Expect(actual interface{}) Assertion
Eventually(ctx context.Context, args ...interface{}) AsyncAssertion
Consistently(ctx context.Context, args ...interface{}) AsyncAssertion
}
type Assertion interface {
Should(matcher types.GomegaMatcher) error
ShouldNot(matcher types.GomegaMatcher) error
To(matcher types.GomegaMatcher) error
ToNot(matcher types.GomegaMatcher) error
NotTo(matcher types.GomegaMatcher) error
}
type AsyncAssertion interface {
Should(matcher types.GomegaMatcher) error
ShouldNot(matcher types.GomegaMatcher) error
WithTimeout(interval time.Duration) AsyncAssertion
WithPolling(interval time.Duration) AsyncAssertion
}
type gomegaInstance struct{}
var _ GomegaInstance = gomegaInstance{}
func (g gomegaInstance) Expect(actual interface{}) Assertion {
return assertion{actual: actual}
}
func (g gomegaInstance) Eventually(ctx context.Context, args ...interface{}) AsyncAssertion {
return newAsyncAssertion(ctx, args, false)
}
func (g gomegaInstance) Consistently(ctx context.Context, args ...interface{}) AsyncAssertion {
return newAsyncAssertion(ctx, args, true)
}
func newG() (*FailureError, gomega.Gomega) {
var failure FailureError
g := gomega.NewGomega(func(msg string, callerSkip ...int) {
failure = FailureError{
msg: msg,
}
})
return &failure, g
}
type assertion struct {
actual interface{}
}
func (a assertion) Should(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).Should(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) ShouldNot(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).ShouldNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) To(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).To(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) ToNot(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).ToNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) NotTo(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).NotTo(matcher) {
err.backtrace()
return *err
}
return nil
}
type asyncAssertion struct {
ctx context.Context
args []interface{}
timeout time.Duration
interval time.Duration
consistently bool
}
func newAsyncAssertion(ctx context.Context, args []interface{}, consistently bool) asyncAssertion {
return asyncAssertion{
ctx: ctx,
args: args,
// PodStart is used as default because waiting for a pod is the
// most common operation.
timeout: TestContext.timeouts.PodStart,
interval: TestContext.timeouts.Poll,
}
}
func (a asyncAssertion) newAsync() (*FailureError, gomega.AsyncAssertion) {
err, g := newG()
var assertion gomega.AsyncAssertion
if a.consistently {
assertion = g.Consistently(a.ctx, a.args...)
} else {
assertion = g.Eventually(a.ctx, a.args...)
}
assertion = assertion.WithTimeout(a.timeout).WithPolling(a.interval)
return err, assertion
}
func (a asyncAssertion) Should(matcher types.GomegaMatcher) error {
err, assertion := a.newAsync()
if !assertion.Should(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a asyncAssertion) ShouldNot(matcher types.GomegaMatcher) error {
err, assertion := a.newAsync()
if !assertion.ShouldNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a asyncAssertion) WithTimeout(timeout time.Duration) AsyncAssertion {
a.timeout = timeout
return a
}
func (a asyncAssertion) WithPolling(interval time.Duration) AsyncAssertion {
a.interval = interval
return a
}
// FailureError is an error where the error string is meant to be passed to
// ginkgo.Fail directly, i.e. adding some prefix like "unexpected error" is not
// necessary. It is also not necessary to dump the error struct.
type FailureError struct {
msg string
fullStackTrace string
}
func (f FailureError) Error() string {
return f.msg
}
func (f FailureError) Backtrace() string {
return f.fullStackTrace
}
func (f FailureError) Is(target error) bool {
return target == ErrFailure
}
func (f *FailureError) backtrace() {
f.fullStackTrace = ginkgotypes.NewCodeLocationWithStackTrace(2).FullStackTrace
}
// ErrFailure is an empty error that can be wrapped to indicate that an error
// is a FailureError. It can also be used to test for a FailureError:.
//
// return fmt.Errorf("some problem%w", ErrFailure)
// ...
// err := someOperation()
// if errors.Is(err, ErrFailure) {
// ...
// }
var ErrFailure error = FailureError{}
// ExpectEqual expects the specified two are the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().To(gomega.BeEqual())
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
}
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().ToNot(gomega.BeEqual())
func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...)
}
// ExpectError expects an error happens, otherwise an exception raises
//
// Deprecated: use gomega.Expect().To(gomega.HaveOccurred()) or (better!) check
// specifically for the error that is expected with
// gomega.Expect().To(gomega.MatchError(gomega.ContainSubstring()))
func ExpectError(err error, explain ...interface{}) {
gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...)
}
@ -72,21 +349,37 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
// failures at the same code line might not be matched in
// https://go.k8s.io/triage because the error details are too
// different.
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
//
// Some errors include all relevant information in the Error
// string. For those we can skip the redundant log message.
// For our own failures we only log the additional stack backtrace
// because it is not included in the failure message.
var failure FailureError
if errors.As(err, &failure) && failure.Backtrace() != "" {
Logf("Failed inside E2E framework:\n %s", strings.ReplaceAll(failure.Backtrace(), "\n", "\n "))
} else if !errors.Is(err, ErrFailure) {
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
}
Fail(prefix+err.Error(), 1+offset)
}
// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter.
//
// Deprecated: use gomega.Expect().To(gomega.ConsistOf()) instead
func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...)
}
// ExpectHaveKey expects the actual map has the key in the keyset
//
// Deprecated: use gomega.Expect().To(gomega.HaveKey()) instead
func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...)
}
// ExpectEmpty expects actual is empty
//
// Deprecated: use gomega.Expect().To(gomega.BeEmpty()) instead
func ExpectEmpty(actual interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...)
}

View File

@ -27,6 +27,7 @@ import (
"math/rand"
"os"
"path"
"reflect"
"strings"
"time"
@ -38,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
v1svc "k8s.io/client-go/applyconfigurations/core/v1"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
@ -54,6 +56,7 @@ import (
const (
// DefaultNamespaceDeletionTimeout is timeout duration for waiting for a namespace deletion.
DefaultNamespaceDeletionTimeout = 5 * time.Minute
defaultServiceAccountName = "default"
)
var (
@ -103,6 +106,7 @@ type Framework struct {
ScalesGetter scaleclient.ScalesGetter
SkipNamespaceCreation bool // Whether to skip creating a namespace
SkipSecretCreation bool // Whether to skip creating secret for a test
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
namespacesToDelete []*v1.Namespace // Some tests have more than one.
NamespaceDeletionTimeout time.Duration
@ -128,7 +132,7 @@ type Framework struct {
// DumpAllNamespaceInfoAction is called after each failed test for namespaces
// created for the test.
type DumpAllNamespaceInfoAction func(f *Framework, namespace string)
type DumpAllNamespaceInfoAction func(ctx context.Context, f *Framework, namespace string)
// TestDataSummary is an interface for managing test data.
type TestDataSummary interface {
@ -144,10 +148,19 @@ type Options struct {
GroupVersion *schema.GroupVersion
}
// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts.
// NewFrameworkWithCustomTimeouts makes a framework with custom timeouts.
// For timeout values that are zero the normal default value continues to
// be used.
func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework {
f := NewDefaultFramework(baseName)
f.Timeouts = timeouts
in := reflect.ValueOf(timeouts).Elem()
out := reflect.ValueOf(f.Timeouts).Elem()
for i := 0; i < in.NumField(); i++ {
value := in.Field(i)
if !value.IsZero() {
out.Field(i).Set(value)
}
}
return f
}
@ -169,7 +182,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
BaseName: baseName,
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
Timeouts: NewTimeoutContext(),
}
// The order is important here: if the extension calls ginkgo.BeforeEach
@ -184,7 +197,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
}
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
func (f *Framework) BeforeEach(ctx context.Context) {
// DeferCleanup, in contrast to AfterEach, triggers execution in
// first-in-last-out order. This ensures that the framework instance
// remains valid as long as possible.
@ -235,7 +248,7 @@ func (f *Framework) BeforeEach() {
if !f.SkipNamespaceCreation {
ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
namespace, err := f.CreateNamespace(ctx, f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
ExpectNoError(err)
@ -244,14 +257,15 @@ func (f *Framework) BeforeEach() {
if TestContext.VerifyServiceAccount {
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
err = WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name)
ExpectNoError(err)
ginkgo.By("Waiting for kube-root-ca.crt to be provisioned in namespace")
err = WaitForKubeRootCAInNamespace(f.ClientSet, namespace.Name)
err = WaitForKubeRootCAInNamespace(ctx, f.ClientSet, namespace.Name)
ExpectNoError(err)
} else {
Logf("Skipping waiting for service account")
}
f.UniqueName = f.Namespace.GetName()
} else {
// not guaranteed to be unique, but very likely
@ -261,17 +275,20 @@ func (f *Framework) BeforeEach() {
f.flakeReport = NewFlakeReport()
}
func (f *Framework) dumpNamespaceInfo() {
func (f *Framework) dumpNamespaceInfo(ctx context.Context) {
if !ginkgo.CurrentSpecReport().Failed() {
return
}
if !TestContext.DumpLogsOnFailure {
return
}
if f.DumpAllNamespaceInfo == nil {
return
}
ginkgo.By("dump namespace information after failure", func() {
if !f.SkipNamespaceCreation {
for _, ns := range f.namespacesToDelete {
f.DumpAllNamespaceInfo(f, ns.Name)
f.DumpAllNamespaceInfo(ctx, f, ns.Name)
}
}
})
@ -315,7 +332,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
}
// AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() {
func (f *Framework) AfterEach(ctx context.Context) {
// This should not happen. Given ClientSet is a public field a test must have updated it!
// Error out early before any API calls during cleanup.
if f.ClientSet == nil {
@ -332,13 +349,13 @@ func (f *Framework) AfterEach() {
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentSpecReport().Failed()) {
for _, ns := range f.namespacesToDelete {
ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil {
if err := f.ClientSet.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
f.DumpAllNamespaceInfo(f, ns.Name)
f.DumpAllNamespaceInfo(ctx, f, ns.Name)
}
} else {
Logf("Namespace %v was already deleted", ns.Name)
@ -385,14 +402,14 @@ func (f *Framework) AfterEach() {
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to
// dump namespace information so as it can be used as an alternative of framework
// deleting the namespace towards the end.
func (f *Framework) DeleteNamespace(name string) {
func (f *Framework) DeleteNamespace(ctx context.Context, name string) {
defer func() {
err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
err := f.ClientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
Logf("error deleting namespace %s: %v", name, err)
return
}
err = WaitForNamespacesDeleted(f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout)
err = WaitForNamespacesDeleted(ctx, f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout)
if err != nil {
Logf("error deleting namespace %s: %v", name, err)
return
@ -409,13 +426,13 @@ func (f *Framework) DeleteNamespace(name string) {
}()
// if current test failed then we should dump namespace information
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
f.DumpAllNamespaceInfo(f, name)
f.DumpAllNamespaceInfo(ctx, f, name)
}
}
// CreateNamespace creates a namespace for e2e testing.
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
func (f *Framework) CreateNamespace(ctx context.Context, baseName string, labels map[string]string) (*v1.Namespace, error) {
createTestingNS := TestContext.CreateTestingNS
if createTestingNS == nil {
createTestingNS = CreateTestingNS
@ -437,14 +454,53 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
}
labels[admissionapi.EnforceLevelLabel] = string(enforceLevel)
ns, err := createTestingNS(baseName, f.ClientSet, labels)
ns, err := createTestingNS(ctx, baseName, f.ClientSet, labels)
// check ns instead of err to see if it's nil as we may
// fail to create serviceAccount in it.
f.AddNamespacesToDelete(ns)
if TestContext.E2EDockerConfigFile != "" && !f.SkipSecretCreation {
// With the Secret created, the default service account (in the new namespace)
// is patched with the secret and can then be referenced by all the pods spawned by E2E process, and repository authentication should be successful.
secret, err := f.createSecretFromDockerConfig(ctx, ns.Name)
if err != nil {
return ns, fmt.Errorf("failed to create secret from docker config file: %v", err)
}
serviceAccountClient := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
serviceAccountConfig := v1svc.ServiceAccount(defaultServiceAccountName, ns.Name)
serviceAccountConfig.ImagePullSecrets = append(serviceAccountConfig.ImagePullSecrets, v1svc.LocalObjectReferenceApplyConfiguration{Name: &secret.Name})
svc, err := serviceAccountClient.Apply(ctx, serviceAccountConfig, metav1.ApplyOptions{FieldManager: "e2e-framework"})
if err != nil {
return ns, fmt.Errorf("failed to patch imagePullSecret [%s] to service account [%s]: %v", secret.Name, svc.Name, err)
}
}
return ns, err
}
// createSecretFromDockerConfig creates a secret using the private image registry credentials.
// The credentials are provided by --e2e-docker-config-file flag.
func (f *Framework) createSecretFromDockerConfig(ctx context.Context, namespace string) (*v1.Secret, error) {
contents, err := os.ReadFile(TestContext.E2EDockerConfigFile)
if err != nil {
return nil, fmt.Errorf("error reading docker config file: %v", err)
}
secretObject := &v1.Secret{
Data: map[string][]byte{v1.DockerConfigJsonKey: contents},
Type: v1.SecretTypeDockerConfigJson,
}
secretObject.GenerateName = "registry-cred"
Logf("create image pull secret %s", secretObject.Name)
secret, err := f.ClientSet.CoreV1().Secrets(namespace).Create(ctx, secretObject, metav1.CreateOptions{})
return secret, err
}
// RecordFlakeIfError records flakeness info if error happens.
// NOTE: This function is not used at any places yet, but we are in progress for https://github.com/kubernetes/kubernetes/issues/66239 which requires this. Please don't remove this.
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
@ -526,11 +582,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
return nil
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, body interface{}) bool {
return ginkgo.It(text+" [Conformance]", ginkgo.Offset(1), body)
}
// PodStateVerification represents a verification of pod state.
// Any time you have a set of pods that you want to operate against or query,
// this struct can be used to declaratively identify those pods.
@ -596,7 +647,7 @@ func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
}
// filterLabels returns a list of pods which have labels.
func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
func filterLabels(ctx context.Context, selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
var err error
var selector labels.Selector
var pl *v1.PodList
@ -605,9 +656,9 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin
if len(selectors) > 0 {
selector = labels.SelectorFromSet(labels.Set(selectors))
options := metav1.ListOptions{LabelSelector: selector.String()}
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), options)
pl, err = cli.CoreV1().Pods(ns).List(ctx, options)
} else {
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
pl, err = cli.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
}
return pl, err
}
@ -615,13 +666,13 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin
// filter filters pods which pass a filter. It can be used to compose
// the more useful abstractions like ForEach, WaitFor, and so on, which
// can be used directly by tests.
func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
func (p *PodStateVerification) filter(ctx context.Context, c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
if len(p.ValidPhases) == 0 || namespace == nil {
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
}
ns := namespace.Name
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
pl, err := filterLabels(ctx, p.Selectors, c, ns) // Build an v1.PodList to operate against.
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil {
return pl.Items, err
@ -649,12 +700,12 @@ ReturnPodsSoFar:
// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification
// definition.
func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) {
func (cl *ClusterVerification) WaitFor(ctx context.Context, atLeast int, timeout time.Duration) ([]v1.Pod, error) {
pods := []v1.Pod{}
var returnedErr error
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
pods, returnedErr = cl.podState.filter(cl.client, cl.namespace)
err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) {
pods, returnedErr = cl.podState.filter(ctx, cl.client, cl.namespace)
// Failure
if returnedErr != nil {
@ -677,8 +728,8 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
}
// WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong.
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
pods, err := cl.WaitFor(atLeast, timeout)
func (cl *ClusterVerification) WaitForOrFail(ctx context.Context, atLeast int, timeout time.Duration) {
pods, err := cl.WaitFor(ctx, atLeast, timeout)
if err != nil || len(pods) < atLeast {
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
@ -689,8 +740,8 @@ func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration)
//
// For example, if you require at least 5 pods to be running before your test will pass,
// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach.
func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(cl.client, cl.namespace)
func (cl *ClusterVerification) ForEach(ctx context.Context, podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(ctx, cl.client, cl.namespace)
if err == nil {
if len(pods) == 0 {
Failf("No pods matched the filter.")

145
vendor/k8s.io/kubernetes/test/e2e/framework/get.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"errors"
"fmt"
"time"
"github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetFunc is a function which retrieves a certain object.
type GetFunc[T any] func(ctx context.Context) (T, error)
// APIGetFunc is a get functions as used in client-go.
type APIGetFunc[T any] func(ctx context.Context, name string, getOptions metav1.GetOptions) (T, error)
// APIListFunc is a list functions as used in client-go.
type APIListFunc[T any] func(ctx context.Context, listOptions metav1.ListOptions) (T, error)
// GetObject takes a get function like clientset.CoreV1().Pods(ns).Get
// and the parameters for it and returns a function that executes that get
// operation in a [gomega.Eventually] or [gomega.Consistently].
//
// Delays and retries are handled by [HandleRetry]. A "not found" error is
// a fatal error that causes polling to stop immediately. If that is not
// desired, then wrap the result with [IgnoreNotFound].
func GetObject[T any](get APIGetFunc[T], name string, getOptions metav1.GetOptions) GetFunc[T] {
return HandleRetry(func(ctx context.Context) (T, error) {
return get(ctx, name, getOptions)
})
}
// ListObjects takes a list function like clientset.CoreV1().Pods(ns).List
// and the parameters for it and returns a function that executes that list
// operation in a [gomega.Eventually] or [gomega.Consistently].
//
// Delays and retries are handled by [HandleRetry].
func ListObjects[T any](list APIListFunc[T], listOptions metav1.ListOptions) GetFunc[T] {
return HandleRetry(func(ctx context.Context) (T, error) {
return list(ctx, listOptions)
})
}
// HandleRetry wraps an arbitrary get function. When the wrapped function
// returns an error, HandleGetError will decide whether the call should be
// retried and if requested, will sleep before doing so.
//
// This is meant to be used inside [gomega.Eventually] or [gomega.Consistently].
func HandleRetry[T any](get GetFunc[T]) GetFunc[T] {
return func(ctx context.Context) (T, error) {
t, err := get(ctx)
if err != nil {
if retry, delay := ShouldRetry(err); retry {
if delay > 0 {
// We could return
// gomega.TryAgainAfter(delay) here,
// but then we need to funnel that
// error through any other
// wrappers. Waiting directly is simpler.
ctx, cancel := context.WithTimeout(ctx, delay)
defer cancel()
<-ctx.Done()
}
return t, err
}
// Give up polling immediately.
var null T
return t, gomega.StopTrying(fmt.Sprintf("Unexpected final error while getting %T", null)).Wrap(err)
}
return t, nil
}
}
// ShouldRetry decides whether to retry an API request. Optionally returns a
// delay to retry after.
func ShouldRetry(err error) (retry bool, retryAfter time.Duration) {
// if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
if delay, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
return shouldRetry, time.Duration(delay) * time.Second
}
// these errors indicate a transient error that should be retried.
if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) || errors.As(err, &transientError{}) {
return true, 0
}
return false, 0
}
// RetryNotFound wraps an arbitrary get function. When the wrapped function
// encounters a "not found" error, that error is treated as a transient problem
// and polling continues.
//
// This is meant to be used inside [gomega.Eventually] or [gomega.Consistently].
func RetryNotFound[T any](get GetFunc[T]) GetFunc[T] {
return func(ctx context.Context) (T, error) {
t, err := get(ctx)
if apierrors.IsNotFound(err) {
// If we are wrapping HandleRetry, then the error will
// be gomega.StopTrying. We need to get rid of that,
// otherwise gomega.Eventually will stop.
var stopTryingErr gomega.PollingSignalError
if errors.As(err, &stopTryingErr) {
if wrappedErr := errors.Unwrap(stopTryingErr); wrappedErr != nil {
err = wrappedErr
}
}
// Mark the error as transient in case that we get
// wrapped by HandleRetry.
err = transientError{error: err}
}
return t, err
}
}
// transientError wraps some other error and indicates that the
// wrapper error is something that may go away.
type transientError struct {
error
}
func (err transientError) Unwrap() error {
return err.error
}

View File

@ -18,16 +18,55 @@ package framework
import (
"path"
"reflect"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
// IgnoreNotFound can be used to wrap an arbitrary function in a call to
// [ginkgo.DeferCleanup]. When the wrapped function returns an error that
// `apierrors.IsNotFound` considers as "not found", the error is ignored
// instead of failing the test during cleanup. This is useful for cleanup code
// that just needs to ensure that some object does not exist anymore.
func IgnoreNotFound(in any) any {
inType := reflect.TypeOf(in)
inValue := reflect.ValueOf(in)
return reflect.MakeFunc(inType, func(args []reflect.Value) []reflect.Value {
out := inValue.Call(args)
if len(out) > 0 {
lastValue := out[len(out)-1]
last := lastValue.Interface()
if last != nil && lastValue.Type().Implements(errInterface) && apierrors.IsNotFound(last.(error)) {
out[len(out)-1] = reflect.Zero(errInterface)
}
}
return out
}).Interface()
}
// AnnotatedLocation can be used to provide more informative source code
// locations by passing the result as additional parameter to a
// BeforeEach/AfterEach/DeferCleanup/It/etc.
func AnnotatedLocation(annotation string) types.CodeLocation {
codeLocation := types.NewCodeLocation(1)
return AnnotatedLocationWithOffset(annotation, 1)
}
// AnnotatedLocationWithOffset skips additional call stack levels. With 0 as offset
// it is identical to [AnnotatedLocation].
func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocation {
codeLocation := types.NewCodeLocation(offset + 1)
codeLocation.FileName = path.Base(codeLocation.FileName)
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
return codeLocation
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, args ...interface{}) bool {
args = append(args, ginkgo.Offset(1))
return ginkgo.It(text+" [Conformance]", args...)
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package junit
import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
// WriteJUnitReport generates a JUnit file that is shorter than the one
// normally written by `ginkgo --junit-report`. This is needed because the full
// report can become too large for tools like Spyglass
// (https://github.com/kubernetes/kubernetes/issues/111510).
func WriteJUnitReport(report ginkgo.Report, filename string) error {
config := reporters.JunitReportConfig{
// Remove details for specs where we don't care.
OmitTimelinesForSpecState: types.SpecStatePassed | types.SpecStateSkipped,
// Don't write <failure message="summary">. The same text is
// also in the full text for the failure. If we were to write
// both, then tools like kettle and spyglass would concatenate
// the two strings and thus show duplicated information.
OmitFailureMessageAttr: true,
}
return reporters.GenerateJUnitReportWithConfig(report, filename, config)
}

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -22,6 +22,7 @@ import (
"io"
"net"
"net/url"
"os"
"os/exec"
"strings"
"syscall"
@ -48,9 +49,12 @@ func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
return b
}
// WithEnv sets the given environment and returns itself.
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
b.cmd.Env = env
// WithEnv appends the given environment and returns itself.
func (b *KubectlBuilder) AppendEnv(env []string) *KubectlBuilder {
if b.cmd.Env == nil {
b.cmd.Env = os.Environ()
}
b.cmd.Env = append(b.cmd.Env, env...)
return b
}

View File

@ -100,8 +100,8 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd {
}
// LogFailedContainers runs `kubectl logs` on a failed containers.
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
func LogFailedContainers(ctx context.Context, c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
@ -109,18 +109,18 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", framework.Logf)
kubectlLogPod(ctx, c, pod, "", framework.Logf)
}
}
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
func kubectlLogPod(ctx context.Context, c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
logs, err := e2epod.GetPodLogs(ctx, c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
logs, err = e2epod.GetPreviousPodLogs(ctx, c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}

View File

@ -17,14 +17,10 @@ limitations under the License.
package framework
import (
"bytes"
"fmt"
"regexp"
"runtime/debug"
"time"
"github.com/onsi/ginkgo/v2"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
)
func nowStamp() string {
@ -45,61 +41,9 @@ func Logf(format string, args ...interface{}) {
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
skip := 1
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgo.Fail(nowStamp()+": "+msg, skip)
ginkgo.Fail(msg, skip)
panic("unreachable")
}
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
// together with a stack trace and then calls ginkgowrapper.Fail.
func Fail(msg string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgo.Fail(nowStamp()+": "+msg, skip)
}
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)
// PrunedStack is a wrapper around debug.Stack() that removes information
// about the current goroutine and optionally skips some of the initial stack entries.
// With skip == 0, the returned stack will start with the caller of PruneStack.
// From the remaining entries it automatically filters out useless ones like
// entries coming from Ginkgo.
//
// This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/v2/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25:
// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter)
// - source code filtering updated to be specific to Kubernetes
// - optimized to use bytes and in-place slice filtering from
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
func PrunedStack(skip int) []byte {
fullStackTrace := debug.Stack()
stack := bytes.Split(fullStackTrace, []byte("\n"))
// Ensure that the even entries are the method names and
// the odd entries the source code information.
if len(stack) > 0 && bytes.HasPrefix(stack[0], []byte("goroutine ")) {
// Ignore "goroutine 29 [running]:" line.
stack = stack[1:]
}
// The "+2" is for skipping over:
// - runtime/debug.Stack()
// - PrunedStack()
skip += 2
if len(stack) > 2*skip {
stack = stack[2*skip:]
}
n := 0
for i := 0; i < len(stack)/2; i++ {
// We filter out based on the source code file name.
if !codeFilterRE.Match([]byte(stack[i*2+1])) {
stack[n] = stack[i*2]
stack[n+1] = stack[i*2+1]
n += 2
}
}
stack = stack[:n]
return bytes.Join(stack, []byte("\n"))
}
// Fail is an alias for ginkgo.Fail.
var Fail = ginkgo.Fail

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -0,0 +1,14 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-instrumentation-approvers
emeritus_approvers:
- fabxc
- piosz
- fgrzadkowski
- kawych
- x13n
reviewers:
- sig-instrumentation-reviewers
labels:
- sig/instrumentation

View File

@ -17,8 +17,6 @@ limitations under the License.
package metrics
import (
"context"
"k8s.io/component-base/metrics/testutil"
)
@ -42,11 +40,3 @@ func parseAPIServerMetrics(data string) (APIServerMetrics, error) {
}
return result, nil
}
func (g *Grabber) getMetricsFromAPIServer() (string, error) {
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do(context.TODO()).Raw()
if err != nil {
return "", err
}
return string(rawOutput), nil
}

View File

@ -17,24 +17,26 @@ limitations under the License.
package metrics
import (
"context"
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
)
func GrabBeforeEach(f *framework.Framework) (result *Collection) {
func GrabBeforeEach(ctx context.Context, f *framework.Framework) (result *Collection) {
gatherMetricsAfterTest := framework.TestContext.GatherMetricsAfterTest == "true" || framework.TestContext.GatherMetricsAfterTest == "master"
if !gatherMetricsAfterTest || !framework.TestContext.IncludeClusterAutoscalerMetrics {
return nil
}
ginkgo.By("Gathering metrics before test", func() {
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
grabber, err := NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
return
}
metrics, err := grabber.Grab()
metrics, err := grabber.Grab(ctx)
if err != nil {
framework.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
return
@ -46,7 +48,7 @@ func GrabBeforeEach(f *framework.Framework) (result *Collection) {
return
}
func GrabAfterEach(f *framework.Framework, before *Collection) {
func GrabAfterEach(ctx context.Context, f *framework.Framework, before *Collection) {
if framework.TestContext.GatherMetricsAfterTest == "false" {
return
}
@ -54,12 +56,12 @@ func GrabAfterEach(f *framework.Framework, before *Collection) {
ginkgo.By("Gathering metrics after test", func() {
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
grabMetricsFromKubelets := framework.TestContext.GatherMetricsAfterTest != "master" && !framework.ProviderIs("kubemark")
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
grabber, err := NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
return
}
received, err := grabber.Grab()
received, err := grabber.Grab(ctx)
if err != nil {
framework.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
return

View File

@ -68,9 +68,12 @@ func NewKubeletMetrics() KubeletMetrics {
}
// GrabKubeletMetricsWithoutProxy retrieve metrics from the kubelet on the given node using a simple GET over http.
// Currently only used in integration tests.
func GrabKubeletMetricsWithoutProxy(nodeName, path string) (KubeletMetrics, error) {
resp, err := http.Get(fmt.Sprintf("http://%s%s", nodeName, path))
func GrabKubeletMetricsWithoutProxy(ctx context.Context, nodeName, path string) (KubeletMetrics, error) {
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://%s%s", nodeName, path), nil)
if err != nil {
return KubeletMetrics{}, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return KubeletMetrics{}, err
}
@ -90,31 +93,6 @@ func parseKubeletMetrics(data string) (KubeletMetrics, error) {
return result, nil
}
func (g *Grabber) getMetricsFromNode(nodeName string, kubeletPort int) (string, error) {
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
finished := make(chan struct{}, 1)
var err error
var rawOutput []byte
go func() {
rawOutput, err = g.client.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
Suffix("metrics").
Do(context.TODO()).Raw()
finished <- struct{}{}
}()
select {
case <-time.After(proxyTimeout):
return "", fmt.Errorf("Timed out when waiting for proxy to gather metrics from %v", nodeName)
case <-finished:
if err != nil {
return "", err
}
return string(rawOutput), nil
}
}
// KubeletLatencyMetric stores metrics scraped from the kubelet server's /metric endpoint.
// TODO: Get some more structure around the metrics and this type
type KubeletLatencyMetric struct {
@ -137,21 +115,21 @@ func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j].
// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber;
// or else, the function will try to get kubelet metrics directly from the node.
func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (KubeletMetrics, error) {
func getKubeletMetricsFromNode(ctx context.Context, c clientset.Interface, nodeName string) (KubeletMetrics, error) {
if c == nil {
return GrabKubeletMetricsWithoutProxy(nodeName, "/metrics")
return GrabKubeletMetricsWithoutProxy(ctx, nodeName, "/metrics")
}
grabber, err := NewMetricsGrabber(c, nil, nil, true, false, false, false, false, false)
grabber, err := NewMetricsGrabber(ctx, c, nil, nil, true, false, false, false, false, false)
if err != nil {
return KubeletMetrics{}, err
}
return grabber.GrabFromKubelet(nodeName)
return grabber.GrabFromKubelet(ctx, nodeName)
}
// GetKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims
// the subsystem prefix.
func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics, error) {
ms, err := getKubeletMetricsFromNode(c, nodeName)
func GetKubeletMetrics(ctx context.Context, c clientset.Interface, nodeName string) (KubeletMetrics, error) {
ms, err := getKubeletMetricsFromNode(ctx, c, nodeName)
if err != nil {
return KubeletMetrics{}, err
}
@ -217,8 +195,8 @@ func GetKubeletLatencyMetrics(ms KubeletMetrics, filterMetricNames sets.String)
}
// HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics.
func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
ms, err := GetKubeletMetrics(c, nodeName)
func HighLatencyKubeletOperations(ctx context.Context, c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
ms, err := GetKubeletMetrics(ctx, c, nodeName)
if err != nil {
return KubeletLatencyMetrics{}, err
}

View File

@ -54,6 +54,7 @@ var MetricsGrabbingDisabledError = errors.New("metrics grabbing disabled")
// Collection is metrics collection of components
type Collection struct {
APIServerMetrics APIServerMetrics
APIServerMetricsSLIs APIServerMetrics
ControllerManagerMetrics ControllerManagerMetrics
SnapshotControllerMetrics SnapshotControllerMetrics
KubeletMetrics map[string]KubeletMetrics
@ -88,7 +89,7 @@ type Grabber struct {
// Collecting metrics data is an optional debug feature. Not all clusters will
// support it. If disabled for a component, the corresponding Grab function
// will immediately return an error derived from MetricsGrabbingDisabledError.
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) {
func NewMetricsGrabber(ctx context.Context, c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) {
kubeScheduler := ""
kubeControllerManager := ""
@ -102,7 +103,7 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *re
return nil, errors.New("a rest config is required for grabbing kube-controller and kube-controller-manager metrics")
}
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
@ -132,18 +133,18 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *re
externalClient: ec,
config: config,
grabFromAPIServer: apiServer,
grabFromControllerManager: checkPodDebugHandlers(c, controllers, "kube-controller-manager", kubeControllerManager),
grabFromControllerManager: checkPodDebugHandlers(ctx, c, controllers, "kube-controller-manager", kubeControllerManager),
grabFromKubelets: kubelets,
grabFromScheduler: checkPodDebugHandlers(c, scheduler, "kube-scheduler", kubeScheduler),
grabFromScheduler: checkPodDebugHandlers(ctx, c, scheduler, "kube-scheduler", kubeScheduler),
grabFromClusterAutoscaler: clusterAutoscaler,
grabFromSnapshotController: checkPodDebugHandlers(c, snapshotController, "snapshot-controller", snapshotControllerManager),
grabFromSnapshotController: checkPodDebugHandlers(ctx, c, snapshotController, "snapshot-controller", snapshotControllerManager),
kubeScheduler: kubeScheduler,
kubeControllerManager: kubeControllerManager,
snapshotController: snapshotControllerManager,
}, nil
}
func checkPodDebugHandlers(c clientset.Interface, requested bool, component, podName string) bool {
func checkPodDebugHandlers(ctx context.Context, c clientset.Interface, requested bool, component, podName string) bool {
if !requested {
return false
}
@ -155,7 +156,7 @@ func checkPodDebugHandlers(c clientset.Interface, requested bool, component, pod
// The debug handlers on the host where the pod runs might be disabled.
// We can check that indirectly by trying to retrieve log output.
limit := int64(1)
if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(context.TODO()); err != nil {
if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(ctx); err != nil {
klog.Warningf("Can't retrieve log output of %s (%q). Debug handlers might be disabled in kubelet. Grabbing metrics from %s is disabled.",
podName, err, component)
return false
@ -171,8 +172,8 @@ func (g *Grabber) HasControlPlanePods() bool {
}
// GrabFromKubelet returns metrics from kubelet
func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
func (g *Grabber) GrabFromKubelet(ctx context.Context, nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
if err != nil {
return KubeletMetrics{}, err
}
@ -180,22 +181,47 @@ func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
}
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
return g.grabFromKubeletInternal(nodeName, int(kubeletPort))
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort))
}
func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (KubeletMetrics, error) {
func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int) (KubeletMetrics, error) {
if kubeletPort <= 0 || kubeletPort > 65535 {
return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering", kubeletPort)
}
output, err := g.getMetricsFromNode(nodeName, int(kubeletPort))
output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort))
if err != nil {
return KubeletMetrics{}, err
}
return parseKubeletMetrics(output)
}
func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int) (string, error) {
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
finished := make(chan struct{}, 1)
var err error
var rawOutput []byte
go func() {
rawOutput, err = g.client.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
Suffix("metrics").
Do(ctx).Raw()
finished <- struct{}{}
}()
select {
case <-time.After(proxyTimeout):
return "", fmt.Errorf("Timed out when waiting for proxy to gather metrics from %v", nodeName)
case <-finished:
if err != nil {
return "", err
}
return string(rawOutput), nil
}
}
// GrabFromScheduler returns metrics from scheduler
func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, error) {
if !g.grabFromScheduler {
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler: %w", MetricsGrabbingDisabledError)
}
@ -203,7 +229,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
var err error
g.waitForSchedulerReadyOnce.Do(func() {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr)
}
})
@ -213,8 +239,8 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
var lastMetricsFetchErr error
var output string
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err := fmt.Errorf("error waiting for kube-scheduler pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -225,7 +251,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
}
// GrabFromClusterAutoscaler returns metrics from cluster autoscaler
func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) {
func (g *Grabber) GrabFromClusterAutoscaler(ctx context.Context) (ClusterAutoscalerMetrics, error) {
if !g.HasControlPlanePods() && g.externalClient == nil {
return ClusterAutoscalerMetrics{}, fmt.Errorf("ClusterAutoscaler: %w", MetricsGrabbingDisabledError)
}
@ -238,7 +264,7 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
client = g.client
namespace = metav1.NamespaceSystem
}
output, err := g.getMetricsFromPod(client, "cluster-autoscaler", namespace, 8085)
output, err := g.getMetricsFromPod(ctx, client, "cluster-autoscaler", namespace, 8085)
if err != nil {
return ClusterAutoscalerMetrics{}, err
}
@ -246,7 +272,7 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
}
// GrabFromControllerManager returns metrics from controller manager
func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) {
func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerManagerMetrics, error) {
if !g.grabFromControllerManager {
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager: %w", MetricsGrabbingDisabledError)
}
@ -254,7 +280,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
var err error
g.waitForControllerManagerReadyOnce.Do(func() {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr)
}
})
@ -264,8 +290,8 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err := fmt.Errorf("error waiting for kube-controller-manager to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -276,7 +302,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
}
// GrabFromSnapshotController returns metrics from controller manager
func (g *Grabber) GrabFromSnapshotController(podName string, port int) (SnapshotControllerMetrics, error) {
func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string, port int) (SnapshotControllerMetrics, error) {
if !g.grabFromSnapshotController {
return SnapshotControllerMetrics{}, fmt.Errorf("volume-snapshot-controller: %w", MetricsGrabbingDisabledError)
}
@ -293,7 +319,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
var err error
g.waitForSnapshotControllerReadyOnce.Do(func() {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr)
}
})
@ -303,8 +329,8 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, port)
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -315,28 +341,59 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
}
// GrabFromAPIServer returns metrics from API server
func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) {
output, err := g.getMetricsFromAPIServer()
func (g *Grabber) GrabFromAPIServer(ctx context.Context) (APIServerMetrics, error) {
output, err := g.getMetricsFromAPIServer(ctx)
if err != nil {
return APIServerMetrics{}, err
}
return parseAPIServerMetrics(output)
}
// GrabMetricsSLIsFromAPIServer returns metrics from API server
func (g *Grabber) GrabMetricsSLIsFromAPIServer(ctx context.Context) (APIServerMetrics, error) {
output, err := g.getMetricsSLIsFromAPIServer(ctx)
if err != nil {
return APIServerMetrics{}, err
}
return parseAPIServerMetrics(output)
}
func (g *Grabber) getMetricsFromAPIServer(ctx context.Context) (string, error) {
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do(ctx).Raw()
if err != nil {
return "", err
}
return string(rawOutput), nil
}
func (g *Grabber) getMetricsSLIsFromAPIServer(ctx context.Context) (string, error) {
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics/slis").Do(ctx).Raw()
if err != nil {
return "", err
}
return string(rawOutput), nil
}
// Grab returns metrics from corresponding component
func (g *Grabber) Grab() (Collection, error) {
func (g *Grabber) Grab(ctx context.Context) (Collection, error) {
result := Collection{}
var errs []error
if g.grabFromAPIServer {
metrics, err := g.GrabFromAPIServer()
metrics, err := g.GrabFromAPIServer(ctx)
if err != nil {
errs = append(errs, err)
} else {
result.APIServerMetrics = metrics
}
metrics, err = g.GrabMetricsSLIsFromAPIServer(ctx)
if err != nil {
errs = append(errs, err)
} else {
result.APIServerMetricsSLIs = metrics
}
}
if g.grabFromScheduler {
metrics, err := g.GrabFromScheduler()
metrics, err := g.GrabFromScheduler(ctx)
if err != nil {
errs = append(errs, err)
} else {
@ -344,7 +401,7 @@ func (g *Grabber) Grab() (Collection, error) {
}
}
if g.grabFromControllerManager {
metrics, err := g.GrabFromControllerManager()
metrics, err := g.GrabFromControllerManager(ctx)
if err != nil {
errs = append(errs, err)
} else {
@ -352,7 +409,7 @@ func (g *Grabber) Grab() (Collection, error) {
}
}
if g.grabFromSnapshotController {
metrics, err := g.GrabFromSnapshotController(g.snapshotController, snapshotControllerPort)
metrics, err := g.GrabFromSnapshotController(ctx, g.snapshotController, snapshotControllerPort)
if err != nil {
errs = append(errs, err)
} else {
@ -360,7 +417,7 @@ func (g *Grabber) Grab() (Collection, error) {
}
}
if g.grabFromClusterAutoscaler {
metrics, err := g.GrabFromClusterAutoscaler()
metrics, err := g.GrabFromClusterAutoscaler(ctx)
if err != nil {
errs = append(errs, err)
} else {
@ -369,13 +426,13 @@ func (g *Grabber) Grab() (Collection, error) {
}
if g.grabFromKubelets {
result.KubeletMetrics = make(map[string]KubeletMetrics)
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
errs = append(errs, err)
} else {
for _, node := range nodes.Items {
kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port
metrics, err := g.grabFromKubeletInternal(node.Name, int(kubeletPort))
metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort))
if err != nil {
errs = append(errs, err)
}
@ -390,14 +447,14 @@ func (g *Grabber) Grab() (Collection, error) {
}
// getMetricsFromPod retrieves metrics data from an insecure port.
func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) {
func (g *Grabber) getMetricsFromPod(ctx context.Context, client clientset.Interface, podName string, namespace string, port int) (string, error) {
rawOutput, err := client.CoreV1().RESTClient().Get().
Namespace(namespace).
Resource("pods").
SubResource("proxy").
Name(fmt.Sprintf("%s:%d", podName, port)).
Suffix("metrics").
Do(context.TODO()).Raw()
Do(ctx).Raw()
if err != nil {
return "", err
}
@ -409,7 +466,7 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string,
// similar to "kubectl port-forward" + "kubectl get --raw
// https://localhost:<port>/metrics". It uses the same credentials
// as kubelet.
func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port int) (string, error) {
func (g *Grabber) getSecureMetricsFromPod(ctx context.Context, podName string, namespace string, port int) (string, error) {
dialer := e2epod.NewDialer(g.client, g.config)
metricConfig := rest.CopyConfig(g.config)
addr := e2epod.Addr{
@ -444,7 +501,7 @@ func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port
rawOutput, err := metricClient.RESTClient().Get().
AbsPath("metrics").
Do(context.TODO()).Raw()
Do(ctx).Raw()
if err != nil {
return "", err
}

View File

@ -0,0 +1,49 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
// NamespacedName comprises a resource name, with a mandatory namespace,
// rendered as "<namespace>/<name>". It implements NamedObject and thus can be
// used as function parameter instead of a full API object.
type NamespacedName struct {
Namespace string
Name string
}
var _ NamedObject = NamespacedName{}
// NamedObject is a subset of metav1.Object which provides read-only access
// to name and namespace of an object.
type NamedObject interface {
GetNamespace() string
GetName() string
}
// GetNamespace implements NamedObject.
func (n NamespacedName) GetNamespace() string {
return n.Namespace
}
// GetName implements NamedObject.
func (n NamespacedName) GetName() string {
return n.Name
}
// String returns the general purpose string representation
func (n NamespacedName) String() string {
return n.Namespace + "/" + n.Name
}

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -39,16 +39,17 @@ const (
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
func WaitForAllNodesSchedulable(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
return wait.PollImmediate(
return wait.PollImmediateWithContext(
ctx,
30*time.Second,
timeout,
CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
CheckReadyForTests(ctx, c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
)
}
@ -58,9 +59,9 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la
}
// ExpectNodeHasLabel expects that the given node has the given label pair.
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
func ExpectNodeHasLabel(ctx context.Context, c clientset.Interface, nodeName string, labelKey string, labelValue string) {
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(node.Labels[labelKey], labelValue)
}
@ -76,17 +77,17 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string)
}
// ExpectNodeHasTaint expects that the node has the given taint.
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
func ExpectNodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
if has, err := NodeHasTaint(ctx, c, nodeName, taint); !has {
framework.ExpectNoError(err)
framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
// NodeHasTaint returns true if the node has the given taint, else returns false.
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -104,14 +105,14 @@ func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %v", err)
func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(ctx, c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %w", err)
}
return nil
}
func allNodesReady(c clientset.Interface, timeout time.Duration) error {
func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
@ -119,10 +120,10 @@ func allNodesReady(c clientset.Interface, timeout time.Duration) error {
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"sync"
"time"
@ -39,31 +40,31 @@ type NodeKiller struct {
// NewNodeKiller creates new NodeKiller.
func NewNodeKiller(config framework.NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
config.NodeKillerStopCh = make(chan struct{})
config.NodeKillerStopCtx, config.NodeKillerStop = context.WithCancel(context.Background())
return &NodeKiller{config, client, provider}
}
// Run starts NodeKiller until stopCh is closed.
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
func (k *NodeKiller) Run(ctx context.Context) {
// wait.JitterUntil starts work immediately, so wait first.
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
wait.JitterUntil(func() {
nodes := k.pickNodes()
k.kill(nodes)
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
wait.JitterUntilWithContext(ctx, func(ctx context.Context) {
nodes := k.pickNodes(ctx)
k.kill(ctx, nodes)
}, k.config.Interval, k.config.JitterFactor, true)
}
func (k *NodeKiller) pickNodes() []v1.Node {
nodes, err := GetReadySchedulableNodes(k.client)
func (k *NodeKiller) pickNodes(ctx context.Context) []v1.Node {
nodes, err := GetReadySchedulableNodes(ctx, k.client)
framework.ExpectNoError(err)
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
nodes, err = GetBoundedReadySchedulableNodes(k.client, numNodes)
nodes, err = GetBoundedReadySchedulableNodes(ctx, k.client, numNodes)
framework.ExpectNoError(err)
return nodes.Items
}
func (k *NodeKiller) kill(nodes []v1.Node) {
func (k *NodeKiller) kill(ctx context.Context, nodes []v1.Node) {
wg := sync.WaitGroup{}
wg.Add(len(nodes))
for _, node := range nodes {
@ -73,7 +74,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
defer wg.Done()
framework.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
err := e2essh.IssueSSHCommand(ctx, "sudo systemctl stop docker kubelet", k.provider, &node)
if err != nil {
framework.Logf("ERROR while stopping node %q: %v", node.Name, err)
return
@ -82,7 +83,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
time.Sleep(k.config.SimulatedDowntime)
framework.Logf("Rebooting %q to repair the node", node.Name)
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
err = e2essh.IssueSSHCommand(ctx, "sudo reboot", k.provider, &node)
if err != nil {
framework.Logf("ERROR while rebooting node %q: %v", node.Name, err)
return

View File

@ -193,8 +193,8 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
}
// TotalRegistered returns number of schedulable Nodes.
func TotalRegistered(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
func TotalRegistered(ctx context.Context, c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
return 0, err
@ -203,8 +203,8 @@ func TotalRegistered(c clientset.Interface) (int, error) {
}
// TotalReady returns number of ready schedulable Nodes.
func TotalReady(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
func TotalReady(ctx context.Context, c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
return 0, err
@ -217,36 +217,28 @@ func TotalReady(c clientset.Interface) (int, error) {
return len(nodes.Items), nil
}
// GetExternalIP returns node external IP concatenated with port 22 for ssh
// GetSSHExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetExternalIP(node *v1.Node) (string, error) {
func GetSSHExternalIP(node *v1.Node) (string, error) {
framework.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
break
return net.JoinHostPort(a.Address, sshPort), nil
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
// GetInternalIP returns node internal IP
func GetInternalIP(node *v1.Node) (string, error) {
host := ""
// GetSSHInternalIP returns node internal IP concatenated with port 22 for ssh
func GetSSHInternalIP(node *v1.Node) (string, error) {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP && address.Address != "" {
host = net.JoinHostPort(address.Address, sshPort)
break
return net.JoinHostPort(address.Address, sshPort), nil
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
// FirstAddressByTypeAndFamily returns the first address that matches the given type and family of the list of nodes
@ -301,10 +293,10 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
}
// PickIP picks one public node IP
func PickIP(c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(c)
func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(ctx, c)
if err != nil {
return "", fmt.Errorf("get node public IPs error: %s", err)
return "", fmt.Errorf("get node public IPs error: %w", err)
}
if len(publicIps) == 0 {
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
@ -314,10 +306,10 @@ func PickIP(c clientset.Interface) (string, error) {
}
// GetPublicIps returns a public IP list of nodes.
func GetPublicIps(c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodes(c)
func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
return nil, fmt.Errorf("get schedulable and ready nodes error: %w", err)
}
ips := CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
@ -332,10 +324,10 @@ func GetPublicIps(c clientset.Interface) ([]string, error) {
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
// If there are no nodes that are both ready and schedulable, this will return an error.
func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(c)
func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
@ -349,8 +341,8 @@ func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err er
// GetBoundedReadySchedulableNodes is like GetReadySchedulableNodes except that it returns
// at most maxNodes nodes. Use this to keep your test case from blowing up when run on a
// large cluster.
func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) {
nodes, err = GetReadySchedulableNodes(c)
func GetBoundedReadySchedulableNodes(ctx context.Context, c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) {
nodes, err = GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, err
}
@ -369,58 +361,22 @@ func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes
// GetRandomReadySchedulableNode gets a single randomly-selected node which is available for
// running pods on. If there are no available nodes it will return an error.
func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(c)
func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, err
}
return &nodes.Items[rand.Intn(len(nodes.Items))], nil
}
// GetSubnetPrefix gets first 2 number of an IP in the node subnet. [IPv4]
// It assumes that the subnet mask is /16.
func GetSubnetPrefix(c clientset.Interface) ([]string, error) {
node, err := GetReadySchedulableWorkerNode(c)
if err != nil {
return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %v", err)
}
internalIP, err := GetInternalIP(node)
if err != nil {
return nil, fmt.Errorf("error getting Node internal IP, err: %v", err)
}
splitted := strings.Split(internalIP, ".")
if len(splitted) == 4 {
return splitted[:2], nil
}
return nil, fmt.Errorf("invalid IP address format: %s", internalIP)
}
// GetReadySchedulableWorkerNode gets a single worker node which is available for
// running pods on. If there are no such available nodes it will return an error.
func GetReadySchedulableWorkerNode(c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(c)
if err != nil {
return nil, err
}
for i := range nodes.Items {
node := nodes.Items[i]
_, isMaster := node.Labels["node-role.kubernetes.io/master"]
_, isControlPlane := node.Labels["node-role.kubernetes.io/control-plane"]
if !isMaster && !isControlPlane {
return &node, nil
}
}
return nil, fmt.Errorf("there are currently no ready, schedulable worker nodes in the cluster")
}
// GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted.
// There are cases when we care about tainted nodes
// E.g. in tests related to nodes with gpu we care about nodes despite
// presence of nvidia.com/gpu=present:NoSchedule taint
func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(c)
func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node)
@ -437,25 +393,6 @@ func isNodeUntainted(node *v1.Node) bool {
// isNodeUntaintedWithNonblocking tests whether a fake pod can be scheduled on "node"
// but allows for taints in the list of non-blocking taints.
func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
// Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{}
for _, t := range strings.Split(nonblockingTaints, ",") {
@ -475,7 +412,8 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
}
n = nodeCopy
}
return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, fakePod.Spec.Tolerations)
return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, nil)
}
func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool {
@ -558,10 +496,10 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
}
// PodNodePairs return podNode pairs for all pods in a namespace
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodNode, error) {
var result []PodNode
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return result, err
}
@ -577,10 +515,10 @@ func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
}
// GetClusterZones returns the values of zone label collected from all nodes.
func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@ -598,11 +536,11 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
}
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(c)
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@ -620,8 +558,8 @@ func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
}
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
func CreatePodsPerNodeForSimpleApp(ctx context.Context, c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := GetBoundedReadySchedulableNodes(ctx, c, maxCount)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
podLabels := map[string]string{
@ -629,7 +567,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
}
for i, node := range nodes.Items {
framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
_, err := c.CoreV1().Pods(namespace).Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: podLabels,
@ -644,33 +582,33 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
// RemoveTaintsOffNode removes a list of taints from the given node
// It is simply a helper wrapper for RemoveTaintOffNode
func RemoveTaintsOffNode(c clientset.Interface, nodeName string, taints []v1.Taint) {
func RemoveTaintsOffNode(ctx context.Context, c clientset.Interface, nodeName string, taints []v1.Taint) {
for _, taint := range taints {
RemoveTaintOffNode(c, nodeName, taint)
RemoveTaintOffNode(ctx, c, nodeName, taint)
}
}
// RemoveTaintOffNode removes the given taint from the given node.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
err := removeNodeTaint(c, nodeName, nil, &taint)
func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) {
err := removeNodeTaint(ctx, c, nodeName, nil, &taint)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
verifyThatTaintIsGone(c, nodeName, &taint)
verifyThatTaintIsGone(ctx, c, nodeName, &taint)
}
// AddOrUpdateTaintOnNode adds the given taint to the given node or updates taint.
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) {
// TODO use wrapper methods in expect.go after removing the dependency on this
// package from the core e2e framework.
err := addOrUpdateTaintOnNode(c, nodeName, &taint)
err := addOrUpdateTaintOnNode(ctx, c, nodeName, &taint)
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
}
// addOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
// to update nodes; otherwise, no API calls. Return error if any.
// copied from pkg/controller/controller_utils.go AddOrUpdateTaintOnNode()
func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
func addOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
@ -681,10 +619,10 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
}
if err != nil {
return err
@ -705,7 +643,7 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
return patchNodeTaints(ctx, c, nodeName, oldNode, newNode)
})
}
@ -768,7 +706,7 @@ var semantic = conversion.EqualitiesOrDie(
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
@ -793,10 +731,10 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
}
if err != nil {
return err
@ -817,15 +755,15 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
return patchNodeTaints(ctx, c, nodeName, oldNode, newNode)
})
}
// patchNodeTaints patches node's taints.
func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err)
}
newTaints := newNode.Spec.Taints
@ -833,15 +771,15 @@ func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
}
@ -877,9 +815,9 @@ func deleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool)
return newTaints, deleted
}
func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
nodeUpdated, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())

View File

@ -1,51 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
imageutils "k8s.io/kubernetes/test/utils/image"
utilpointer "k8s.io/utils/pointer"
)
const (
// PreconfiguredRuntimeClassHandler is the name of the runtime handler
// that is expected to be preconfigured in the test environment.
PreconfiguredRuntimeClassHandler = "test-handler"
)
// NewRuntimeClassPod returns a test pod with the given runtimeClassName
func NewRuntimeClassPod(runtimeClassName string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("test-runtimeclass-%s-", runtimeClassName),
},
Spec: v1.PodSpec{
RuntimeClassName: &runtimeClassName,
Containers: []v1.Container{{
Name: "test",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"true"},
}},
RestartPolicy: v1.RestartPolicyNever,
AutomountServiceAccountToken: utilpointer.BoolPtr(false),
},
}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"time"
"k8s.io/apimachinery/pkg/util/wait"
@ -25,7 +26,7 @@ import (
)
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
func WaitForSSHTunnels(ctx context.Context, namespace string) {
framework.Logf("Waiting for SSH tunnels to establish")
e2ekubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",
@ -35,7 +36,7 @@ func WaitForSSHTunnels(namespace string) {
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})

View File

@ -40,21 +40,21 @@ var requiredPerNodePods = []*regexp.Regexp{
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckReady(c, size, timeout)
func WaitForReadyNodes(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckReady(ctx, c, size, timeout)
return err
}
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
framework.Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
@ -63,7 +63,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
@ -114,10 +114,10 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
func WaitConditionToBe(ctx context.Context, c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
framework.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if err != nil {
framework.Logf("Couldn't get node %s", name)
continue
@ -134,20 +134,37 @@ func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.Node
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(c, name, v1.NodeReady, false, timeout)
func WaitForNodeToBeNotReady(ctx context.Context, c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(ctx, c, name, v1.NodeReady, false, timeout)
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(c, name, v1.NodeReady, true, timeout)
func WaitForNodeToBeReady(ctx context.Context, c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(ctx, c, name, v1.NodeReady, true, timeout)
}
func WaitForNodeSchedulable(ctx context.Context, c clientset.Interface, name string, timeout time.Duration, wantSchedulable bool) bool {
framework.Logf("Waiting up to %v for node %s to be schedulable: %t", timeout, name, wantSchedulable)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if err != nil {
framework.Logf("Couldn't get node %s", name)
continue
}
if IsNodeSchedulable(node) == wantSchedulable {
return true
}
}
framework.Logf("Node %s didn't reach desired schedulable status (%t) within %v", name, wantSchedulable, timeout)
return false
}
// CheckReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
nodes, err := waitListSchedulableNodes(c)
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
continue
@ -172,11 +189,11 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@ -190,8 +207,8 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
}
// checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
nodes, err := waitListSchedulableNodes(c)
func checkWaitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err)
}
@ -199,9 +216,9 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
}
// CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests).
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
func CheckReadyForTests(ctx context.Context, c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func(ctx context.Context) (bool, error) {
attempt := 0
return func() (bool, error) {
return func(ctx context.Context) (bool, error) {
if allowedNotReadyNodes == -1 {
return true, nil
}
@ -212,7 +229,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
// remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic.
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
allNodes, err := c.CoreV1().Nodes().List(ctx, opts)
if err != nil {
var terminalListNodesErr error
framework.Logf("Unexpected error listing nodes: %v", err)

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -52,76 +52,76 @@ type Config struct {
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to become Unschedulable
err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Unschedulable: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
func CreateClientPod(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// CreatePod with given claims based on node selector
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
// CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, podConfig, timeout)
func CreateSecPod(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(ctx, client, podConfig, timeout)
}
// CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod, err := MakeSecPod(podConfig)
if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err)
return nil, fmt.Errorf("Unable to create pod: %w", err)
}
pod, err = client.CoreV1().Pods(podConfig.NS).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, podConfig.NS, timeout)
err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(podConfig.NS).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}

View File

@ -37,9 +37,9 @@ const (
// DeletePodOrFail deletes the pod of the specified namespace and name. Resilient to the pod
// not existing.
func DeletePodOrFail(c clientset.Interface, ns, name string) {
func DeletePodOrFail(ctx context.Context, c clientset.Interface, ns, name string) {
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil && apierrors.IsNotFound(err) {
return
}
@ -49,41 +49,41 @@ func DeletePodOrFail(c clientset.Interface, ns, name string) {
// DeletePodWithWait deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
func DeletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) error {
if pod == nil {
return nil
}
return DeletePodWithWaitByName(c, pod.GetName(), pod.GetNamespace())
return DeletePodWithWaitByName(ctx, c, pod.GetName(), pod.GetNamespace())
}
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName, podNamespace string) error {
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
err := c.CoreV1().Pods(podNamespace).Delete(ctx, podName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout)
err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
return fmt.Errorf("pod %q was not deleted: %w", podName, err)
}
return nil
}
// DeletePodWithGracePeriod deletes the passed-in pod. Resilient to the pod not existing.
func DeletePodWithGracePeriod(c clientset.Interface, pod *v1.Pod, grace int64) error {
return DeletePodWithGracePeriodByName(c, pod.GetName(), pod.GetNamespace(), grace)
func DeletePodWithGracePeriod(ctx context.Context, c clientset.Interface, pod *v1.Pod, grace int64) error {
return DeletePodWithGracePeriodByName(ctx, c, pod.GetName(), pod.GetNamespace(), grace)
}
// DeletePodsWithGracePeriod deletes the passed-in pods. Resilient to the pods not existing.
func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64) error {
func DeletePodsWithGracePeriod(ctx context.Context, c clientset.Interface, pods []v1.Pod, grace int64) error {
for _, pod := range pods {
if err := DeletePodWithGracePeriod(c, &pod, grace); err != nil {
if err := DeletePodWithGracePeriod(ctx, c, &pod, grace); err != nil {
return err
}
}
@ -91,14 +91,14 @@ func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64
}
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
func DeletePodWithGracePeriodByName(ctx context.Context, c clientset.Interface, podName, podNamespace string, grace int64) error {
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
err := c.CoreV1().Pods(podNamespace).Delete(ctx, podName, *metav1.NewDeleteOptions(grace))
if err != nil {
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
return nil
}

View File

@ -87,13 +87,13 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err)
return nil, fmt.Errorf("create round tripper: %w", err)
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err)
return nil, fmt.Errorf("dialer failed: %w", err)
}
requestID := "1"
defer func() {
@ -112,7 +112,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
// This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err)
return nil, fmt.Errorf("error creating error stream: %w", err)
}
errorStream.Close()
go func() {
@ -129,7 +129,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err)
return nil, fmt.Errorf("error creating data stream: %w", err)
}
return &stream{

View File

@ -87,6 +87,7 @@ func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, strin
// ExecCommandInContainerWithFullOutput executes a command in the
// specified container and return stdout, stderr and error
func ExecCommandInContainerWithFullOutput(f *framework.Framework, podName, containerName string, cmd ...string) (string, string, error) {
// TODO (pohly): add context support
return ExecWithOptions(f, ExecOptions{
Command: cmd,
Namespace: f.Namespace.Name,
@ -114,28 +115,28 @@ func ExecShellInContainer(f *framework.Framework, podName, containerName string,
return ExecCommandInContainer(f, podName, containerName, "/bin/sh", "-c", cmd)
}
func execCommandInPod(f *framework.Framework, podName string, cmd ...string) string {
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
func execCommandInPod(ctx context.Context, f *framework.Framework, podName string, cmd ...string) string {
pod, err := NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return ExecCommandInContainer(f, podName, pod.Spec.Containers[0].Name, cmd...)
}
func execCommandInPodWithFullOutput(f *framework.Framework, podName string, cmd ...string) (string, string, error) {
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
func execCommandInPodWithFullOutput(ctx context.Context, f *framework.Framework, podName string, cmd ...string) (string, string, error) {
pod, err := NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return ExecCommandInContainerWithFullOutput(f, podName, pod.Spec.Containers[0].Name, cmd...)
}
// ExecShellInPod executes the specified command on the pod.
func ExecShellInPod(f *framework.Framework, podName string, cmd string) string {
return execCommandInPod(f, podName, "/bin/sh", "-c", cmd)
func ExecShellInPod(ctx context.Context, f *framework.Framework, podName string, cmd string) string {
return execCommandInPod(ctx, f, podName, "/bin/sh", "-c", cmd)
}
// ExecShellInPodWithFullOutput executes the specified command on the Pod and returns stdout, stderr and error.
func ExecShellInPodWithFullOutput(f *framework.Framework, podName string, cmd string) (string, string, error) {
return execCommandInPodWithFullOutput(f, podName, "/bin/sh", "-c", cmd)
func ExecShellInPodWithFullOutput(ctx context.Context, f *framework.Framework, podName string, cmd string) (string, string, error) {
return execCommandInPodWithFullOutput(ctx, f, podName, "/bin/sh", "-c", cmd)
}
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {

31
vendor/k8s.io/kubernetes/test/e2e/framework/pod/get.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
// Get creates a function which retrieves the pod anew each time the function
// is called. Fatal errors are detected by framework.HandleRetry and cause
// polling to stop.
func Get(c clientset.Interface, pod framework.NamedObject) framework.GetFunc[*v1.Pod] {
return framework.HandleRetry(framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{}))
}

View File

@ -107,7 +107,7 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
return out, fmt.Errorf("RunHostCmd still failed after %v: %w", elapsed, err)
}
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
@ -121,6 +121,15 @@ func LookForStringInLog(ns, podName, container, expectedString string, timeout t
})
}
// LookForStringInLogWithoutKubectl looks for the given string in the log of a specific pod container
func LookForStringInLogWithoutKubectl(ctx context.Context, client clientset.Interface, ns string, podName string, container string, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
podLogs, err := e2epod.GetPodLogs(ctx, client, ns, podName, container)
framework.ExpectNoError(err)
return podLogs
})
}
// CreateEmptyFileOnPod creates empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
@ -128,8 +137,8 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
}
// DumpDebugInfo dumps debug info of tests.
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
func DumpDebugInfo(ctx context.Context, c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := e2ekubectl.RunKubectl(ns, "describe", "po", s.Name)
framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
@ -142,6 +151,7 @@ func DumpDebugInfo(c clientset.Interface, ns string) {
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func MatchContainerOutput(
ctx context.Context,
f *framework.Framework,
pod *v1.Pod,
containerName string,
@ -153,25 +163,25 @@ func MatchContainerOutput(
}
podClient := e2epod.PodClientNS(f, ns)
createdPod := podClient.Create(pod)
createdPod := podClient.Create(ctx, pod)
defer func() {
ginkgo.By("delete the pod")
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
return fmt.Errorf("failed to get pod status: %w", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
_ = apiv1pod.VisitContainers(&podStatus.Spec, apiv1pod.AllFeatureEnabledContainers(), func(c *v1.Container, containerType apiv1pod.ContainerType) bool {
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, c.Name)
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
@ -187,18 +197,18 @@ func MatchContainerOutput(
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
return fmt.Errorf("expected %q in container output: %w", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
@ -210,21 +220,21 @@ func MatchContainerOutput(
// TestContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
func TestContainerOutput(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
func TestContainerOutput(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
}
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
func TestContainerOutputRegexp(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
func TestContainerOutputRegexp(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
}
// TestContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func TestContainerOutputMatcher(f *framework.Framework,
func TestContainerOutputMatcher(ctx context.Context, f *framework.Framework,
scenarioName string,
pod *v1.Pod,
containerIndex int,
@ -234,5 +244,5 @@ func TestContainerOutputMatcher(f *framework.Framework,
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
framework.Failf("Invalid container index: %d", containerIndex)
}
framework.ExpectNoError(MatchContainerOutput(f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
framework.ExpectNoError(MatchContainerOutput(ctx, f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}

View File

@ -27,7 +27,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
@ -93,26 +92,26 @@ type PodClient struct {
}
// Create creates a new pod according to the framework specifications (don't wait for it to start).
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod {
c.mungeSpec(pod)
p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{})
p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Pod")
return p
}
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
namespace := c.f.Namespace.Name
p := c.Create(pod)
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
p := c.Create(ctx, pod)
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
p, err := c.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return p
}
// CreateBatch create a batch of pods. All pods are created before waiting.
func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
ps := make([]*v1.Pod, len(pods))
var wg sync.WaitGroup
for i, pod := range pods {
@ -120,7 +119,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
go func(i int, pod *v1.Pod) {
defer wg.Done()
defer ginkgo.GinkgoRecover()
ps[i] = c.CreateSync(pod)
ps[i] = c.CreateSync(ctx, pod)
}(i, pod)
}
wg.Wait()
@ -130,14 +129,14 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
// Update updates the pod object. It retries if there is a conflict, throw out error if
// there is any other apierrors. name is the pod name, updateFn is the function updating the
// pod object.
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{})
func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *v1.Pod)) {
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
}
updateFn(pod)
_, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{})
_, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{})
if err == nil {
framework.Logf("Successfully updated pod %q", name)
return true, nil
@ -146,12 +145,12 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
return false, fmt.Errorf("failed to update pod %q: %w", name, err)
}))
}
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
func (c *PodClient) AddEphemeralContainerSync(ctx context.Context, pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
namespace := c.f.Namespace.Name
podJS, err := json.Marshal(pod)
@ -166,24 +165,23 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta
framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
if _, err := c.Patch(ctx, pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
return err
}
framework.ExpectNoError(WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
framework.ExpectNoError(WaitForContainerRunning(ctx, c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
return nil
}
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {
func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.DeleteOptions, timeout time.Duration) {
namespace := c.f.Namespace.Name
err := c.Delete(context.TODO(), name, options)
err := c.Delete(ctx, name, options)
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Failed to delete pod %q: %v", name, err)
}
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, namespace, timeout), "wait for pod %q to disappear", name)
}
// mungeSpec apply test-suite specific transformations to the pod spec.
@ -224,9 +222,9 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
// WaitForSuccess waits for pod to succeed.
// TODO(random-liu): Move pod wait function into this file
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
func (c *PodClient) WaitForSuccess(ctx context.Context, name string, timeout time.Duration) {
f := c.f
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -241,9 +239,9 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
}
// WaitForFinish waits for pod to finish running, regardless of success or failure.
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time.Duration) {
f := c.f
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -258,12 +256,12 @@ func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
}
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
return false, fmt.Errorf("error in listing events: %w", err)
}
for _, e := range evnts.Items {
switch e.Reason {
@ -282,15 +280,15 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
}
// MatchContainerOutput gets output of a container and match expected regexp in the output.
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
func (c *PodClient) MatchContainerOutput(ctx context.Context, name string, containerName string, expectedRegexp string) error {
f := c.f
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
output, err := GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, name, containerName)
if err != nil {
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
}
regex, err := regexp.Compile(expectedRegexp)
if err != nil {
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err)
return fmt.Errorf("failed to compile regexp %q: %w", expectedRegexp, err)
}
if !regex.MatchString(output) {
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)
@ -299,16 +297,16 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
}
// PodIsReady returns true if the specified pod is ready. Otherwise false.
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
func (c *PodClient) PodIsReady(ctx context.Context, name string) bool {
pod, err := c.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err)
return podutils.IsPodReady(pod)
}
// RemovePodFinalizer removes the pod's finalizer
func (c *PodClient) RemoveFinalizer(podName string, finalizerName string) {
func (c *PodClient) RemoveFinalizer(ctx context.Context, podName string, finalizerName string) {
framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName)
c.Update(podName, func(pod *v1.Pod) {
c.Update(ctx, podName, func(pod *v1.Pod) {
pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil)
})
}

View File

@ -18,7 +18,6 @@ package pod
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
@ -31,7 +30,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@ -40,14 +38,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached completed state.
var errPodCompleted = FinalError(errors.New("pod ran to completion successfully"))
// errPodFailed is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached a permanent failue state.
var errPodFailed = FinalError(errors.New("pod failed permanently"))
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
// have their logs fetched.
@ -69,109 +59,20 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
}
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with their own pod name.
type ProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
// NewProxyResponseChecker returns a context for checking pods responses.
func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker {
return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.CoreV1().Pods(r.ns).List(context.TODO(), options)
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
ctx, cancel := context.WithTimeout(context.Background(), singleCallTimeout)
defer cancel()
body, err := r.c.CoreV1().RESTClient().Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do(ctx).
Raw()
if err != nil {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// PodsCreated returns a pod list matched by the given name.
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
return PodsCreatedByLabel(ctx, c, ns, name, replicas, label)
}
// PodsCreatedByLabel returns a created pod list matched by the given label.
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
return nil, err
}
@ -194,34 +95,31 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
}
// VerifyPods checks if the specified pod is responding.
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
func VerifyPods(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, true)
}
// VerifyPodsRunning checks if the specified pod is running.
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
func VerifyPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, false)
}
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(c, ns, name, replicas)
func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(ctx, c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
e := podsRunning(ctx, c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
if checkResponding {
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
}
return nil
}
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
func podsRunning(ctx context.Context, c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
ginkgo.By("ensuring each pod is running")
@ -230,7 +128,7 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
for _, pod := range pods.Items {
go func(p v1.Pod) {
errorChan <- WaitForPodRunningInNamespace(c, &p)
errorChan <- WaitForPodRunningInNamespace(ctx, c, &p)
}(pod)
}
@ -302,7 +200,7 @@ func logPodTerminationMessages(pods []v1.Pod) {
// We will log the Pods that have the LabelLogOnPodFailure label. If there aren't any, we default to
// logging only the first 5 Pods. This requires the reportDir to be set, and the pods are logged into:
// {report_dir}/pods/{namespace}/{pod}/{container_name}/logs.txt
func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) {
func logPodLogs(ctx context.Context, c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) {
if reportDir == "" {
return
}
@ -328,7 +226,7 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
for i := 0; i < maxPods; i++ {
pod := logPods[i]
for _, container := range pod.Spec.Containers {
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
logs, err := getPodLogsInternal(ctx, c, namespace, pod.Name, container.Name, false, nil, &tailLen)
if err != nil {
framework.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
continue
@ -351,14 +249,14 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
}
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
func DumpAllPodInfoForNamespace(ctx context.Context, c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("unable to fetch pod debug info: %v", err)
}
LogPodStates(pods.Items)
logPodTerminationMessages(pods.Items)
logPodLogs(c, namespace, pods.Items, reportDir)
logPodLogs(ctx, c, namespace, pods.Items, reportDir)
}
// FilterNonRestartablePods filters out pods that will never get recreated if
@ -459,15 +357,15 @@ func newExecPodSpec(ns, generateName string) *v1.Pod {
// CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands.
// Pod name is uniquely generated.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
func CreateExecPodOrFail(ctx context.Context, client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
framework.Logf("Creating new exec pod")
pod := newExecPodSpec(ns, generateName)
if tweak != nil {
tweak(pod)
}
execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
execPod, err := client.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
err = WaitForPodNameRunningInNamespace(client, execPod.Name, execPod.Namespace)
err = WaitForPodNameRunningInNamespace(ctx, client, execPod.Name, execPod.Namespace)
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
return execPod
}
@ -497,20 +395,20 @@ func WithWindowsHostProcess(pod *v1.Pod, username string) {
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
func CheckPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(ctx, c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
func CheckPodsRunningReadyOrSucceeded(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(ctx, c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// checkPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
func checkPodsCondition(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
framework.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
type waitPodResult struct {
@ -521,7 +419,7 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
for _, podName := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
err := WaitForPodCondition(ctx, c, ns, name, desc, timeout, condition)
result <- waitPodResult{err == nil, name}
}(podName)
}
@ -539,24 +437,24 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
}
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false, nil, nil)
func GetPodLogs(ctx context.Context, c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(ctx, c, namespace, podName, containerName, false, nil, nil)
}
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
func GetPodLogsSince(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
sinceTime := metav1.NewTime(since)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime, nil)
return getPodLogsInternal(ctx, c, namespace, podName, containerName, false, &sinceTime, nil)
}
// GetPreviousPodLogs returns the logs of the previous instance of the
// specified container (namespace/pod/container).
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true, nil, nil)
func GetPreviousPodLogs(ctx context.Context, c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(ctx, c, namespace, podName, containerName, true, nil, nil)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) {
func getPodLogsInternal(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) {
request := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
@ -569,7 +467,7 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
if tailLines != nil {
request.Param("tailLines", strconv.Itoa(*tailLines))
}
logs, err := request.Do(context.TODO()).Raw()
logs, err := request.Do(ctx).Raw()
if err != nil {
return "", err
}
@ -580,8 +478,8 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
}
// GetPodsInNamespace returns the pods in the given namespace.
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
func GetPodsInNamespace(ctx context.Context, c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}
@ -598,10 +496,10 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin
}
// GetPods return the label matched pods in the given ns
func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) {
func GetPods(ctx context.Context, c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) {
label := labels.SelectorFromSet(matchLabels)
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts)
pods, err := c.CoreV1().Pods(ns).List(ctx, listOpts)
if err != nil {
return []v1.Pod{}, err
}
@ -609,13 +507,13 @@ func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([
}
// GetPodSecretUpdateTimeout returns the timeout duration for updating pod secret.
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
func GetPodSecretUpdateTimeout(ctx context.Context, c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := getNodeTTLAnnotationValue(c)
secretTTL, err := getNodeTTLAnnotationValue(ctx, c)
if err != nil {
framework.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
@ -624,18 +522,18 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
}
// VerifyPodHasConditionWithType verifies the pod has the expected condition by type
func VerifyPodHasConditionWithType(f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
if condition := FindPodConditionByType(&pod.Status, cType); condition == nil {
framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, cType, pod.Status)
}
}
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %w", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
@ -674,15 +572,3 @@ func IsPodActive(p *v1.Pod) bool {
v1.PodFailed != p.Status.Phase &&
p.DeletionTimestamp == nil
}
func podIdentifier(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func identifier(pod *v1.Pod) string {
id := podIdentifier(pod.Namespace, pod.Name)
if pod.UID != "" {
id += fmt.Sprintf("(%s)", pod.UID)
}
return id
}

View File

@ -141,14 +141,14 @@ const DefaultNonRootUserName = "ContainerUser"
// Tests that require a specific user ID should override this.
func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
psc := &v1.PodSecurityContext{
RunAsNonRoot: pointer.BoolPtr(true),
RunAsNonRoot: pointer.Bool(true),
RunAsUser: GetDefaultNonRootUser(),
SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault},
}
if NodeOSDistroIs("windows") {
psc.WindowsOptions = &v1.WindowsSecurityContextOptions{}
psc.WindowsOptions.RunAsUserName = pointer.StringPtr(DefaultNonRootUserName)
psc.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName)
}
return psc
@ -157,7 +157,7 @@ func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
// GetRestrictedContainerSecurityContext returns a minimal restricted container security context.
func GetRestrictedContainerSecurityContext() *v1.SecurityContext {
return &v1.SecurityContext{
AllowPrivilegeEscalation: pointer.BoolPtr(false),
AllowPrivilegeEscalation: pointer.Bool(false),
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
}
}
@ -181,7 +181,7 @@ func MixinRestrictedPodSecurity(pod *v1.Pod) error {
pod.Spec.SecurityContext = GetRestrictedPodSecurityContext()
} else {
if pod.Spec.SecurityContext.RunAsNonRoot == nil {
pod.Spec.SecurityContext.RunAsNonRoot = pointer.BoolPtr(true)
pod.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true)
}
if pod.Spec.SecurityContext.RunAsUser == nil {
pod.Spec.SecurityContext.RunAsUser = GetDefaultNonRootUser()
@ -191,7 +191,7 @@ func MixinRestrictedPodSecurity(pod *v1.Pod) error {
}
if NodeOSDistroIs("windows") && pod.Spec.SecurityContext.WindowsOptions == nil {
pod.Spec.SecurityContext.WindowsOptions = &v1.WindowsSecurityContextOptions{}
pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = pointer.StringPtr(DefaultNonRootUserName)
pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName)
}
}
for i := range pod.Spec.Containers {
@ -241,3 +241,23 @@ func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditi
}
return nil
}
// FindContainerStatusInPod finds a container status by its name in the provided pod
func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
for _, containerStatus := range pod.Status.InitContainerStatuses {
if containerStatus.Name == containerName {
return &containerStatus
}
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Name == containerName {
return &containerStatus
}
}
for _, containerStatus := range pod.Status.EphemeralContainerStatuses {
if containerStatus.Name == containerName {
return &containerStatus
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"os"
"sync"
@ -100,12 +101,12 @@ type ProviderInterface interface {
CreateShare() (string, string, string, error)
DeleteShare(accountName, shareName string) error
CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error)
DeletePVSource(pvSource *v1.PersistentVolumeSource) error
CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error)
DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error
CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string)
CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string)
EnsureLoadBalancerResourcesDeleted(ip, portRange string) error
EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error
LoadBalancerSrcRanges() []string
EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service))
}
@ -159,21 +160,21 @@ func (n NullProvider) DeletePD(pdName string) error {
}
// CreatePVSource is a base implementation which creates PV source.
func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
func (n NullProvider) CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) {
return nil, fmt.Errorf("Provider not supported")
}
// DeletePVSource is a base implementation which deletes PV source.
func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
func (n NullProvider) DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error {
return fmt.Errorf("Provider not supported")
}
// CleanupServiceResources is a base implementation which cleans up service resources.
func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
func (n NullProvider) CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) {
}
// EnsureLoadBalancerResourcesDeleted is a base implementation which ensures load balancer is deleted.
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error {
return nil
}

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -135,21 +135,21 @@ type PersistentVolumeClaimConfig struct {
// PVPVCCleanup cleans up a pv and pvc in a single pv/pvc test case.
// Note: delete errors are appended to []error so that we can attempt to delete both the pvc and pv.
func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error {
func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) []error {
var errs []error
if pvc != nil {
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvc.Name, err))
}
} else {
framework.Logf("pvc is nil")
}
if pv != nil {
err := DeletePersistentVolume(c, pv.Name)
err := DeletePersistentVolume(ctx, c, pv.Name)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pv.Name, err))
}
} else {
framework.Logf("pv is nil")
@ -160,22 +160,22 @@ func PVPVCCleanup(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc
// PVPVCMapCleanup Cleans up pvs and pvcs in multi-pv-pvc test cases. Entries found in the pv and claim maps are
// deleted as long as the Delete api call succeeds.
// Note: delete errors are appended to []error so that as many pvcs and pvs as possible are deleted.
func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap) []error {
func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvols PVMap, claims PVCMap) []error {
var errs []error
for pvcKey := range claims {
err := DeletePersistentVolumeClaim(c, pvcKey.Name, ns)
err := DeletePersistentVolumeClaim(ctx, c, pvcKey.Name, ns)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvcKey.Name, err))
} else {
delete(claims, pvcKey)
}
}
for pvKey := range pvols {
err := DeletePersistentVolume(c, pvKey)
err := DeletePersistentVolume(ctx, c, pvKey)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err))
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pvKey, err))
} else {
delete(pvols, pvKey)
}
@ -184,24 +184,24 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
}
// DeletePersistentVolume deletes the PV.
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
func DeletePersistentVolume(ctx context.Context, c clientset.Interface, pvName string) error {
if c != nil && len(pvName) > 0 {
framework.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(context.TODO(), pvName, metav1.DeleteOptions{})
err := c.CoreV1().PersistentVolumes().Delete(ctx, pvName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
return fmt.Errorf("PV Delete API error: %w", err)
}
}
return nil
}
// DeletePersistentVolumeClaim deletes the Claim.
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
func DeletePersistentVolumeClaim(ctx context.Context, c clientset.Interface, pvcName string, ns string) error {
if c != nil && len(pvcName) > 0 {
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{})
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
return fmt.Errorf("PVC Delete API error: %w", err)
}
}
return nil
@ -210,25 +210,25 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
func DeletePVCandValidatePV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns)
if err != nil {
return err
}
// Wait for the PV's phase to return to be `expectPVPhase`
framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
err = WaitForPersistentVolumePhase(ctx, expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
return fmt.Errorf("pv %q phase did not become %v: %w", pv.Name, expectPVPhase, err)
}
// examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
if expectPVPhase == v1.VolumeAvailable {
@ -254,13 +254,13 @@ func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutCo
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
//
// available pvs.
func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
var boundPVs, deletedPVCs int
for pvName := range pvols {
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
// if pv is bound then delete the pvc it is bound to
@ -273,13 +273,13 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
}
// get the pvc for the delete call below
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, cr.Name, metav1.GetOptions{})
if err == nil {
if err = DeletePVCandValidatePV(c, timeouts, ns, pvc, pv, expectPVPhase); err != nil {
if err = DeletePVCandValidatePV(ctx, c, timeouts, ns, pvc, pv, expectPVPhase); err != nil {
return err
}
} else if !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Get API error: %v", err)
return fmt.Errorf("PVC Get API error: %w", err)
}
// delete pvckey from map even if apierrors.IsNotFound above is true and thus the
// claim was not actually deleted here
@ -294,11 +294,11 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time
}
// create the PV resource. Fails test on error.
func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume
var lastCreateErr error
err := wait.PollImmediate(29*time.Second, timeouts.PVCreate, func() (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
err := wait.PollImmediateWithContext(ctx, 29*time.Second, timeouts.PVCreate, func(ctx context.Context) (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.
// If quota failure strings are found for other platforms, they can be added to improve reliability when running
@ -316,25 +316,25 @@ func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.
})
// if we have an error from creating the PV, use that instead of a timeout error
if lastCreateErr != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
return nil, fmt.Errorf("PV Create API error: %w", err)
}
if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
return nil, fmt.Errorf("PV Create API error: %w", err)
}
return resultPV, nil
}
// CreatePV creates the PV resource. Fails test on error.
func CreatePV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, timeouts, pv)
func CreatePV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(ctx, c, timeouts, pv)
}
// CreatePVC creates the PVC resource. Fails test on error.
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc, metav1.CreateOptions{})
func CreatePVC(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("PVC Create API error: %v", err)
return nil, fmt.Errorf("PVC Create API error: %w", err)
}
return pvc, nil
}
@ -346,7 +346,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
//
// known until after the PVC is instantiated. This is why the pvc is created
// before the pv.
func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVCPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
// make the pvc spec
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
preBindMsg := ""
@ -358,7 +358,7 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
pv := MakePersistentVolume(pvConfig)
ginkgo.By(fmt.Sprintf("Creating a PVC followed by a%s PV", preBindMsg))
pvc, err := CreatePVC(c, ns, pvc)
pvc, err := CreatePVC(ctx, c, ns, pvc)
if err != nil {
return nil, nil, err
}
@ -367,7 +367,7 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
if preBind {
pv.Spec.ClaimRef.Name = pvc.Name
}
pv, err = createPV(c, timeouts, pv)
pv, err = createPV(ctx, c, timeouts, pv)
if err != nil {
return nil, pvc, err
}
@ -382,7 +382,7 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
//
// known until after the PV is instantiated. This is why the pv is created
// before the pvc.
func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVPVC(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
preBindMsg := ""
if preBind {
preBindMsg = " pre-bound"
@ -394,7 +394,7 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
// instantiate the pv
pv, err := createPV(c, timeouts, pv)
pv, err := createPV(ctx, c, timeouts, pv)
if err != nil {
return nil, nil, err
}
@ -402,7 +402,7 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
if preBind {
pvc.Spec.VolumeName = pv.Name
}
pvc, err = CreatePVC(c, ns, pvc)
pvc, err = CreatePVC(ctx, c, ns, pvc)
if err != nil {
return pv, nil, err
}
@ -417,7 +417,7 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
//
// orphaned pvs are not deleted and will remain after the suite completes.
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
func CreatePVsPVCs(ctx context.Context, numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
pvMap := make(PVMap, numpvs)
pvcMap := make(PVCMap, numpvcs)
extraPVCs := 0
@ -430,7 +430,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo
// create pvs and pvcs
for i := 0; i < pvsToCreate; i++ {
pv, pvc, err := CreatePVPVC(c, timeouts, pvConfig, pvcConfig, ns, false)
pv, pvc, err := CreatePVPVC(ctx, c, timeouts, pvConfig, pvcConfig, ns, false)
if err != nil {
return pvMap, pvcMap, err
}
@ -441,7 +441,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo
// create extra pvs or pvcs as needed
for i := 0; i < extraPVs; i++ {
pv := MakePersistentVolume(pvConfig)
pv, err := createPV(c, timeouts, pv)
pv, err := createPV(ctx, c, timeouts, pv)
if err != nil {
return pvMap, pvcMap, err
}
@ -449,7 +449,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo
}
for i := 0; i < extraPVCs; i++ {
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
pvc, err := CreatePVC(c, ns, pvc)
pvc, err := CreatePVC(ctx, c, ns, pvc)
if err != nil {
return pvMap, pvcMap, err
}
@ -459,29 +459,29 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framewo
}
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
func WaitOnPVandPVC(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
return fmt.Errorf("PVC %q did not become Bound: %w", pvc.Name, err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
err = WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
return fmt.Errorf("PV %q did not become Bound: %w", pv.Name, err)
}
// Re-get the pv and pvc objects
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PVC Get API error: %v", err)
return fmt.Errorf("PVC Get API error: %w", err)
}
// The pv and pvc are both bound, but to each other?
@ -508,7 +508,7 @@ func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, n
// to situations where the maximum wait times are reached several times in succession,
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
// small.
func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
var actualBinds int
expectedBinds := len(pvols)
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
@ -516,19 +516,19 @@ func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContex
}
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound)
err := WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound)
if err != nil && len(pvols) > len(claims) {
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs")
continue
}
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
return fmt.Errorf("PV %q did not become Bound: %w", pvName, err)
}
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
if cr != nil && len(cr.Name) > 0 {
@ -539,9 +539,9 @@ func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContex
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
}
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
return fmt.Errorf("PVC %q did not become Bound: %w", cr.Name, err)
}
actualBinds++
}
@ -659,10 +659,15 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
}
}
func createPDWithRetry(zone string) (string, error) {
func createPDWithRetry(ctx context.Context, zone string) (string, error) {
var err error
var newDiskName string
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil {
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %w", zone, err)
}
newDiskName, err = createPD(zone)
if err != nil {
framework.Logf("Couldn't create a new PD in zone %q, sleeping 5 seconds: %v", zone, err)
@ -671,7 +676,6 @@ func createPDWithRetry(zone string) (string, error) {
framework.Logf("Successfully created a new PD in zone %q: %q.", zone, newDiskName)
return newDiskName, nil
}
return "", err
}
func CreateShare() (string, string, string, error) {
@ -683,19 +687,23 @@ func DeleteShare(accountName, shareName string) error {
}
// CreatePDWithRetry creates PD with retry.
func CreatePDWithRetry() (string, error) {
return createPDWithRetry("")
func CreatePDWithRetry(ctx context.Context) (string, error) {
return createPDWithRetry(ctx, "")
}
// CreatePDWithRetryAndZone creates PD on zone with retry.
func CreatePDWithRetryAndZone(zone string) (string, error) {
return createPDWithRetry(zone)
func CreatePDWithRetryAndZone(ctx context.Context, zone string) (string, error) {
return createPDWithRetry(ctx, zone)
}
// DeletePDWithRetry deletes PD with retry.
func DeletePDWithRetry(diskName string) error {
func DeletePDWithRetry(ctx context.Context, diskName string) error {
var err error
for start := time.Now(); time.Since(start) < pdRetryTimeout; time.Sleep(pdRetryPollTime) {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil {
return fmt.Errorf("timed out while trying to delete PD %q, last error: %w", diskName, err)
}
err = deletePD(diskName)
if err != nil {
framework.Logf("Couldn't delete PD %q, sleeping %v: %v", diskName, pdRetryPollTime, err)
@ -704,7 +712,6 @@ func DeletePDWithRetry(diskName string) error {
framework.Logf("Successfully deleted PD %q.", diskName)
return nil
}
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
}
func createPD(zone string) (string, error) {
@ -719,33 +726,33 @@ func deletePD(pdName string) error {
}
// WaitForPVClaimBoundPhase waits until all pvcs phase set to bound
func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
func WaitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) ([]*v1.PersistentVolume, error) {
persistentvolumes := make([]*v1.PersistentVolume, len(pvclaims))
for index, claim := range pvclaims {
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, timeout)
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, timeout)
if err != nil {
return persistentvolumes, err
}
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
return persistentvolumes, fmt.Errorf("PVC Get API error: %w", err)
}
// Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
return persistentvolumes, fmt.Errorf("PV Get API error: %w", err)
}
}
return persistentvolumes, nil
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, poll, timeout time.Duration) error {
func WaitForPersistentVolumePhase(ctx context.Context, phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, poll, err)
continue
@ -760,13 +767,13 @@ func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.In
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, poll, timeout, true)
func WaitForPersistentVolumeClaimPhase(ctx context.Context, phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, poll, timeout time.Duration) error {
return WaitForPersistentVolumeClaimsPhase(ctx, phase, c, ns, []string{pvcName}, poll, timeout, true)
}
// WaitForPersistentVolumeClaimsPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims
// to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, poll, timeout time.Duration, matchAny bool) error {
func WaitForPersistentVolumeClaimsPhase(ctx context.Context, phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, poll, timeout time.Duration, matchAny bool) error {
if len(pvcNames) == 0 {
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
}
@ -774,7 +781,7 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{})
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, poll, err)
phaseFoundInAllClaims = false
@ -798,24 +805,24 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
}
// CreatePVSource creates a PV source.
func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
diskName, err := CreatePDWithRetryAndZone(zone)
func CreatePVSource(ctx context.Context, zone string) (*v1.PersistentVolumeSource, error) {
diskName, err := CreatePDWithRetryAndZone(ctx, zone)
if err != nil {
return nil, err
}
return framework.TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName)
return framework.TestContext.CloudConfig.Provider.CreatePVSource(ctx, zone, diskName)
}
// DeletePVSource deletes a PV source.
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return framework.TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
func DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error {
return framework.TestContext.CloudConfig.Provider.DeletePVSource(ctx, pvSource)
}
// GetDefaultStorageClassName returns default storageClass or return error
func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{})
func GetDefaultStorageClassName(ctx context.Context, c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
return "", fmt.Errorf("Error listing storage classes: %v", err)
return "", fmt.Errorf("Error listing storage classes: %w", err)
}
var scName string
for _, sc := range list.Items {
@ -834,18 +841,18 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
}
// SkipIfNoDefaultStorageClass skips tests if no default SC can be found.
func SkipIfNoDefaultStorageClass(c clientset.Interface) {
_, err := GetDefaultStorageClassName(c)
func SkipIfNoDefaultStorageClass(ctx context.Context, c clientset.Interface) {
_, err := GetDefaultStorageClassName(ctx, c)
if err != nil {
e2eskipper.Skipf("error finding default storageClass : %v", err)
}
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, poll, timeout time.Duration) error {
func WaitForPersistentVolumeDeleted(ctx context.Context, c clientset.Interface, pvName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err == nil {
framework.Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -38,8 +38,6 @@ import (
func skipInternalf(caller int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
// Long term this should get replaced with https://github.com/onsi/ginkgo/issues/1069.
framework.Logf(msg)
ginkgo.Skip(msg, caller+1)
panic("unreachable")
}
@ -103,9 +101,9 @@ func SkipIfFeatureGateEnabled(gate featuregate.Feature) {
}
// SkipIfMissingResource skips if the gvr resource is missing.
func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
func SkipIfMissingResource(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
_, err := resourceClient.List(context.TODO(), metav1.ListOptions{})
_, err := resourceClient.List(ctx, metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrors.IsMethodNotSupported(err) || apierrors.IsNotFound(err) || apierrors.IsForbidden(err) {
@ -144,8 +142,8 @@ func SkipUnlessProviderIs(supportedProviders ...string) {
}
// SkipUnlessMultizone skips if the cluster does not have multizone.
func SkipUnlessMultizone(c clientset.Interface) {
zones, err := e2enode.GetClusterZones(c)
func SkipUnlessMultizone(ctx context.Context, c clientset.Interface) {
zones, err := e2enode.GetClusterZones(ctx, c)
if err != nil {
skipInternalf(1, "Error listing cluster zones")
}
@ -155,8 +153,8 @@ func SkipUnlessMultizone(c clientset.Interface) {
}
// SkipIfMultizone skips if the cluster has multizone.
func SkipIfMultizone(c clientset.Interface) {
zones, err := e2enode.GetClusterZones(c)
func SkipIfMultizone(ctx context.Context, c clientset.Interface) {
zones, err := e2enode.GetClusterZones(ctx, c)
if err != nil {
skipInternalf(1, "Error listing cluster zones")
}
@ -215,11 +213,11 @@ func SkipUnlessSSHKeyPresent() {
func serverVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
return false, fmt.Errorf("Unable to get server version: %w", err)
}
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
return false, fmt.Errorf("Unable to parse server version %q: %w", serverVersion.GitVersion, err)
}
return sv.AtLeast(v), nil
}
@ -243,11 +241,11 @@ func RunIfSystemSpecNameIs(names ...string) {
}
// SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem run if the component run as pods and client can delete them
func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(componentName string, c clientset.Interface, ns string, labelSet labels.Set) {
func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx context.Context, componentName string, c clientset.Interface, ns string, labelSet labels.Set) {
// verify if component run as pod
label := labels.SelectorFromSet(labelSet)
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts)
pods, err := c.CoreV1().Pods(ns).List(ctx, listOpts)
framework.Logf("SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem: %v, %v", pods, err)
if err != nil {
skipInternalf(1, "Skipped because client failed to get component:%s pod err:%v", componentName, err)
@ -259,7 +257,7 @@ func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(componentName string, c
// verify if client can delete pod
pod := pods.Items[0]
if err := c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
if err := c.CoreV1().Pods(ns).Delete(ctx, pod.Name, metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
skipInternalf(1, "Skipped because client failed to delete component:%s pod, err:%v", componentName, err)
}
}

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -103,12 +103,12 @@ func GetSigner(provider string) (ssh.Signer, error) {
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
buffer, err := os.ReadFile(key)
if err != nil {
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err)
return nil, fmt.Errorf("error reading SSH key %s: %w", key, err)
}
signer, err := ssh.ParsePrivateKey(buffer)
if err != nil {
return nil, fmt.Errorf("error parsing SSH key: '%v'", err)
return nil, fmt.Errorf("error parsing SSH key: %w", err)
}
return signer, err
@ -119,8 +119,8 @@ func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
// looking for internal IPs. If it can't find an internal IP for every node it
// returns an error, though it still returns all hosts that it found in that
// case.
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
func NodeSSHHosts(ctx context.Context, c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(ctx, c)
hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
// If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs.
@ -188,20 +188,20 @@ type Result struct {
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd, provider string) (Result, error) {
return SSH(cmd, net.JoinHostPort(nodeName, SSHPort), provider)
func NodeExec(ctx context.Context, nodeName, cmd, provider string) (Result, error) {
return SSH(ctx, cmd, net.JoinHostPort(nodeName, SSHPort), provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (Result, error) {
func SSH(ctx context.Context, cmd, host, provider string) (Result, error) {
result := Result{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
return result, fmt.Errorf("error getting signer for provider %s: %w", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
@ -212,14 +212,14 @@ func SSH(cmd, host, provider string) (Result, error) {
}
if bastion := os.Getenv(sshBastionEnvKey); len(bastion) > 0 {
stdout, stderr, code, err := runSSHCommandViaBastion(cmd, result.User, bastion, host, signer)
stdout, stderr, code, err := runSSHCommandViaBastion(ctx, cmd, result.User, bastion, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
stdout, stderr, code, err := runSSHCommand(cmd, result.User, host, signer)
stdout, stderr, code, err := runSSHCommand(ctx, cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
@ -229,7 +229,7 @@ func SSH(cmd, host, provider string) (Result, error) {
// runSSHCommandViaBastion returns the stdout, stderr, and exit code from running cmd on
// host as specific user, along with any SSH-level error.
func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, int, error) {
func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signer) (string, string, int, error) {
if user == "" {
user = os.Getenv("USER")
}
@ -241,7 +241,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i
}
client, err := ssh.Dial("tcp", host, config)
if err != nil {
err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
if client, err = ssh.Dial("tcp", host, config); err != nil {
return false, nil // retrying, error will be logged above
@ -250,12 +250,12 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, host, err)
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error creating session to %s@%s: %w", user, host, err)
}
defer session.Close()
@ -275,7 +275,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err
@ -285,7 +285,7 @@ func runSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, i
// host as specific user, along with any SSH-level error. It uses an SSH proxy to connect
// to bastion, then via that tunnel connects to the remote host. Similar to
// sshutil.RunSSHCommand but scoped to the needs of the test infrastructure.
func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer) (string, string, int, error) {
func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host string, signer ssh.Signer) (string, string, int, error) {
// Setup the config, dial the server, and open a session.
config := &ssh.ClientConfig{
User: user,
@ -295,7 +295,7 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
}
bastionClient, err := ssh.Dial("tcp", bastion, config)
if err != nil {
err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) {
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, bastion, err)
if bastionClient, err = ssh.Dial("tcp", bastion, config); err != nil {
return false, err
@ -304,26 +304,26 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %v", user, bastion, err)
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, bastion, err)
}
defer bastionClient.Close()
conn, err := bastionClient.Dial("tcp", host)
if err != nil {
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %v", host, err)
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %w", host, err)
}
defer conn.Close()
ncc, chans, reqs, err := ssh.NewClientConn(conn, host, config)
if err != nil {
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %v", host, err)
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %w", host, err)
}
client := ssh.NewClient(ncc, chans, reqs)
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: %w", user, host, err)
}
defer session.Close()
@ -343,7 +343,7 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err
@ -359,7 +359,7 @@ func LogResult(result Result) {
}
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
func IssueSSHCommandWithResult(ctx context.Context, cmd, provider string, node *v1.Node) (*Result, error) {
framework.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
@ -384,7 +384,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
}
framework.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
result, err := SSH(ctx, cmd, host, provider)
LogResult(result)
if result.Code != 0 || err != nil {
@ -396,8 +396,8 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
}
// IssueSSHCommand tries to execute a SSH command
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(cmd, provider, node)
func IssueSSHCommand(ctx context.Context, cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(ctx, cmd, provider, node)
if err != nil {
return err
}
@ -419,11 +419,11 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(pollNodeInterval, singleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@ -437,8 +437,8 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
nodes, err := waitListSchedulableNodes(c)
func waitListSchedulableNodesOrDie(ctx context.Context, c clientset.Interface) *v1.NodeList {
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
expectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.")
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"crypto/rand"
"encoding/base64"
"errors"
@ -32,6 +33,7 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
"github.com/onsi/gomega"
gomegaformat "github.com/onsi/gomega/format"
restclient "k8s.io/client-go/rest"
@ -40,6 +42,8 @@ import (
"k8s.io/klog/v2"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework/internal/junit"
"k8s.io/kubernetes/test/utils/image"
"k8s.io/kubernetes/test/utils/kubeconfig"
)
@ -100,15 +104,22 @@ type TestContextType struct {
// Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling.
Tooling string
CloudConfig CloudConfig
KubectlPath string
OutputDir string
ReportDir string
ReportPrefix string
Prefix string
MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
// timeouts contains user-configurable timeouts for various operations.
// Individual Framework instance also have such timeouts which may be
// different from these here. To avoid confusion, this field is not
// exported. Its values can be accessed through
// NewTimeoutContext.
timeouts TimeoutContext
CloudConfig CloudConfig
KubectlPath string
OutputDir string
ReportDir string
ReportPrefix string
ReportCompleteGinkgo bool
ReportCompleteJUnit bool
Prefix string
MinStartupPods int
EtcdUpgradeStorage string
EtcdUpgradeVersion string
GCEUpgradeScript string
@ -141,10 +152,6 @@ type TestContextType struct {
IncludeClusterAutoscalerMetrics bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
OutputPrintType string
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
NodeSchedulableTimeout time.Duration
// SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready.
SystemDaemonsetStartupTimeout time.Duration
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
// and labels to be applied to a namespace.
@ -182,6 +189,13 @@ type TestContextType struct {
// DockerConfigFile is a file that contains credentials which can be used to pull images from certain private registries, needed for a test.
DockerConfigFile string
// E2EDockerConfigFile is a docker credentials configuration file used which contains authorization token that can be used to pull images from certain private registries provided by the users.
// For more details refer https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#log-in-to-docker-hub
E2EDockerConfigFile string
// KubeTestRepoConfigFile is a yaml file used for overriding registries for test images.
KubeTestRepoList string
// SnapshotControllerPodName is the name used for identifying the snapshot controller pod.
SnapshotControllerPodName string
@ -214,8 +228,10 @@ type NodeKillerConfig struct {
JitterFactor float64
// SimulatedDowntime is a duration between node is killed and recreated.
SimulatedDowntime time.Duration
// NodeKillerStopCh is a channel that is used to notify NodeKiller to stop killing nodes.
NodeKillerStopCh chan struct{}
// NodeKillerStopCtx is a context that is used to notify NodeKiller to stop killing nodes.
NodeKillerStopCtx context.Context
// NodeKillerStop is the cancel function for NodeKillerStopCtx.
NodeKillerStop func()
}
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
@ -242,6 +258,8 @@ type NodeTestContextType struct {
RestartKubelet bool
// ExtraEnvs is a map of environment names to values.
ExtraEnvs map[string]string
// StandaloneMode indicates whether the test is running kubelet in a standalone mode.
StandaloneMode bool
}
// CloudConfig holds the cloud configuration for e2e test suites.
@ -268,7 +286,9 @@ type CloudConfig struct {
}
// TestContext should be used by all tests to access common context data.
var TestContext TestContextType
var TestContext = TestContextType{
timeouts: defaultTimeouts,
}
// StringArrayValue is used with flag.Var for a comma-separated list of strings placed into a string array.
type stringArrayValue struct {
@ -327,7 +347,9 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost))
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports and other tests results should be saved. Default is empty, which doesn't generate these reports. If ginkgo's -junit-report parameter is used, that parameter instead of -report-dir determines the location of a single JUnit report.")
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the simplified JUnit XML reports and other tests results should be saved. Default is empty, which doesn't generate these reports. If ginkgo's -junit-report parameter is used, that parameter instead of -report-dir determines the location of a single JUnit report.")
flags.BoolVar(&TestContext.ReportCompleteGinkgo, "report-complete-ginkgo", false, "Enables writing a complete test report as Ginkgo JSON to <report dir>/ginkgo/report.json. Ignored if --report-dir is not set.")
flags.BoolVar(&TestContext.ReportCompleteJUnit, "report-complete-junit", false, "Enables writing a complete test report as JUnit XML to <report dir>/ginkgo/report.json. Ignored if --report-dir is not set.")
flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/containerd/containerd.sock", "The container runtime endpoint of cluster VM instances.")
flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
@ -342,12 +364,14 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests. The default taint 'node-role.kubernetes.io/master' is DEPRECATED and will be removed from the list in a future release.")
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for running tests.")
flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.")
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.")
flags.StringVar(&TestContext.DockerConfigFile, "docker-config-file", "", "A file that contains credentials which can be used to pull images from certain private registries, needed for a test.")
flags.StringVar(&TestContext.DockerConfigFile, "docker-config-file", "", "A docker credential file which contains authorization token that is used to perform image pull tests from an authenticated registry. For more details regarding the content of the file refer https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#log-in-to-docker-hub")
flags.StringVar(&TestContext.E2EDockerConfigFile, "e2e-docker-config-file", "", "A docker credentials configuration file used which contains authorization token that can be used to pull images from certain private registries provided by the users. For more details refer https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#log-in-to-docker-hub")
flags.StringVar(&TestContext.KubeTestRepoList, "kube-test-repo-list", "", "A yaml file used for overriding registries for test images. Alternatively, the KUBE_TEST_REPO_LIST env variable can be set.")
flags.StringVar(&TestContext.SnapshotControllerPodName, "snapshot-controller-pod-name", "", "The pod name to use for identifying the snapshot controller in the kube-system namespace.")
flags.IntVar(&TestContext.SnapshotControllerHTTPPort, "snapshot-controller-http-port", 0, "The port to use for snapshot controller HTTP communication.")
@ -358,12 +382,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
func CreateGinkgoConfig() (types.SuiteConfig, types.ReporterConfig) {
// fetch the current config
suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration()
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
suiteConfig.EmitSpecProgress = true
// Randomize specs as well as suites
suiteConfig.RandomizeAllSpecs = true
// Turn on verbose by default to get spec names
reporterConfig.Verbose = true
// Disable skipped tests unless they are explicitly requested.
if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 {
suiteConfig.SkipStrings = []string{`\[Flaky\]|\[Feature:.+\]`}
@ -415,9 +435,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.")
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.")
flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flags.DurationVar(&TestContext.timeouts.SystemPodsStartup, "system-pods-startup-timeout", TestContext.timeouts.SystemPodsStartup, "Timeout for waiting for all system pods to be running before starting tests.")
flags.DurationVar(&TestContext.timeouts.NodeSchedulable, "node-schedulable-timeout", TestContext.timeouts.NodeSchedulable, "Timeout for waiting for all nodes to be schedulable.")
flags.DurationVar(&TestContext.timeouts.SystemDaemonsetStartup, "system-daemonsets-startup-timeout", TestContext.timeouts.SystemDaemonsetStartup, "Timeout for waiting for all system daemonsets to be ready.")
flags.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flags.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flags.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
@ -455,6 +475,9 @@ func AfterReadingAllFlags(t *TestContextType) {
// These flags are not exposed via the normal command line flag set,
// therefore we have to use our own private one here.
if t.KubeTestRepoList != "" {
image.Init(t.KubeTestRepoList)
}
var fs flag.FlagSet
klog.InitFlags(&fs)
fs.Set("logtostderr", "false")
@ -463,6 +486,22 @@ func AfterReadingAllFlags(t *TestContextType) {
fs.Set("stderrthreshold", "10" /* higher than any of the severities -> none pass the threshold */)
klog.SetOutput(ginkgo.GinkgoWriter)
if t.ListImages {
for _, v := range image.GetImageConfigs() {
fmt.Println(v.GetE2EImage())
}
os.Exit(0)
}
// Reconfigure gomega defaults. The poll interval should be suitable
// for most tests. The timeouts are more subjective and tests may want
// to override them, but these defaults are still better for E2E than the
// ones from Gomega (1s timeout, 10ms interval).
gomega.SetDefaultEventuallyPollingInterval(t.timeouts.Poll)
gomega.SetDefaultConsistentlyPollingInterval(t.timeouts.Poll)
gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart)
gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort)
// Only set a default host if one won't be supplied via kubeconfig
if len(t.Host) == 0 && len(t.KubeConfig) == 0 {
// Check if we can use the in-cluster config
@ -526,72 +565,46 @@ func AfterReadingAllFlags(t *TestContextType) {
}
if TestContext.ReportDir != "" {
ginkgo.ReportAfterSuite("Kubernetes e2e JUnit report", writeJUnitReport)
}
}
const (
// This is the traditional gomega.Format default of 4000 for an object
// dump plus some extra room for the message.
maxFailureMessageSize = 5000
truncatedMsg = "\n[... see output for full dump ...]\n"
)
// writeJUnitReport generates a JUnit file in the e2e report directory that is
// shorter than the one normally written by `ginkgo --junit-report`. This is
// needed because the full report can become too large for tools like Spyglass
// (https://github.com/kubernetes/kubernetes/issues/111510).
//
// Users who want the full report can use `--junit-report`.
func writeJUnitReport(report ginkgo.Report) {
trimmedReport := report
trimmedReport.SpecReports = nil
for _, specReport := range report.SpecReports {
// Remove details for any spec that hasn't failed. In Prow,
// the test output captured in build-log.txt has all of this
// information, so we don't need it in the XML.
if specReport.State != types.SpecStateFailed {
specReport.CapturedGinkgoWriterOutput = ""
specReport.CapturedStdOutErr = ""
} else {
// Truncate the failure message if it is too large.
msgLen := len(specReport.Failure.Message)
if msgLen > maxFailureMessageSize {
// Insert full message at the beginning where it is easy to find.
specReport.CapturedGinkgoWriterOutput =
"Full failure message:\n" +
specReport.Failure.Message + "\n\n" +
strings.Repeat("=", 70) + "\n\n" +
specReport.CapturedGinkgoWriterOutput
specReport.Failure.Message = specReport.Failure.Message[0:maxFailureMessageSize/2] + truncatedMsg + specReport.Failure.Message[msgLen-maxFailureMessageSize/2:msgLen]
// Create the directory before running the suite. If
// --report-dir is not unusable, we should report
// that as soon as possible. This will be done by each worker
// in parallel, so we will get "exists" error in most of them.
if err := os.MkdirAll(TestContext.ReportDir, 0777); err != nil && !os.IsExist(err) {
klog.Errorf("Create report dir: %v", err)
os.Exit(1)
}
ginkgoDir := path.Join(TestContext.ReportDir, "ginkgo")
if TestContext.ReportCompleteGinkgo || TestContext.ReportCompleteJUnit {
if err := os.MkdirAll(ginkgoDir, 0777); err != nil && !os.IsExist(err) {
klog.Errorf("Create <report-dir>/ginkgo: %v", err)
os.Exit(1)
}
}
// Remove report entries generated by ginkgo.By("doing
// something") because those are not useful (just have the
// start time) and cause Spyglass to show an additional "open
// stdout" button with a summary of the steps, which usually
// doesn't help. We don't remove all entries because other
// measurements also get reported this way.
//
// Removing the report entries is okay because message text was
// already added to the test output when ginkgo.By was called.
reportEntries := specReport.ReportEntries
specReport.ReportEntries = nil
for _, reportEntry := range reportEntries {
if reportEntry.Name != "By Step" {
specReport.ReportEntries = append(specReport.ReportEntries, reportEntry)
}
if TestContext.ReportCompleteGinkgo {
ginkgo.ReportAfterSuite("Ginkgo JSON report", func(report ginkgo.Report) {
ExpectNoError(reporters.GenerateJSONReport(report, path.Join(ginkgoDir, "report.json")))
})
ginkgo.ReportAfterSuite("JUnit XML report", func(report ginkgo.Report) {
ExpectNoError(reporters.GenerateJUnitReport(report, path.Join(ginkgoDir, "report.xml")))
})
}
trimmedReport.SpecReports = append(trimmedReport.SpecReports, specReport)
}
ginkgo.ReportAfterSuite("Kubernetes e2e JUnit report", func(report ginkgo.Report) {
// With Ginkgo v1, we used to write one file per
// parallel node. Now Ginkgo v2 automatically merges
// all results into a report for us. The 01 suffix is
// kept in case that users expect files to be called
// "junit_<prefix><number>.xml".
junitReport := path.Join(TestContext.ReportDir, "junit_"+TestContext.ReportPrefix+"01.xml")
// With Ginkgo v1, we used to write one file per parallel node. Now
// Ginkgo v2 automatically merges all results into a report for us. The
// 01 suffix is kept in case that users expect files to be called
// "junit_<prefix><number>.xml".
junitReport := path.Join(TestContext.ReportDir, "junit_"+TestContext.ReportPrefix+"01.xml")
reporters.GenerateJUnitReport(trimmedReport, junitReport)
// writeJUnitReport generates a JUnit file in the e2e
// report directory that is shorter than the one
// normally written by `ginkgo --junit-report`. This is
// needed because the full report can become too large
// for tools like Spyglass
// (https://github.com/kubernetes/kubernetes/issues/111510).
ExpectNoError(junit.WriteJUnitReport(report, junitReport))
})
}
}

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -73,7 +73,7 @@ func Read(filePath string) ([]byte, error) {
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err)
return nil, fmt.Errorf("fatal error retrieving test file %s: %w", filePath, err)
}
if data != nil {
return data, nil

View File

@ -18,33 +18,41 @@ package framework
import "time"
const (
// Default timeouts to be used in TimeoutContext
podStartTimeout = 5 * time.Minute
podStartShortTimeout = 2 * time.Minute
podStartSlowTimeout = 15 * time.Minute
podDeleteTimeout = 5 * time.Minute
claimProvisionTimeout = 5 * time.Minute
claimProvisionShortTimeout = 1 * time.Minute
dataSourceProvisionTimeout = 5 * time.Minute
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
pvCreateTimeout = 3 * time.Minute
pvDeleteTimeout = 5 * time.Minute
pvDeleteSlowTimeout = 20 * time.Minute
snapshotCreateTimeout = 5 * time.Minute
snapshotDeleteTimeout = 5 * time.Minute
snapshotControllerMetricsTimeout = 5 * time.Minute
)
var defaultTimeouts = TimeoutContext{
Poll: 2 * time.Second, // from the former e2e/framework/pod poll interval
PodStart: 5 * time.Minute,
PodStartShort: 2 * time.Minute,
PodStartSlow: 15 * time.Minute,
PodDelete: 5 * time.Minute,
ClaimProvision: 5 * time.Minute,
ClaimProvisionShort: 1 * time.Minute,
DataSourceProvision: 5 * time.Minute,
ClaimBound: 3 * time.Minute,
PVReclaim: 3 * time.Minute,
PVBound: 3 * time.Minute,
PVCreate: 3 * time.Minute,
PVDelete: 5 * time.Minute,
PVDeleteSlow: 20 * time.Minute,
SnapshotCreate: 5 * time.Minute,
SnapshotDelete: 5 * time.Minute,
SnapshotControllerMetrics: 5 * time.Minute,
SystemPodsStartup: 10 * time.Minute,
NodeSchedulable: 30 * time.Minute,
SystemDaemonsetStartup: 5 * time.Minute,
}
// TimeoutContext contains timeout settings for several actions.
type TimeoutContext struct {
// Poll is how long to wait between API calls when waiting for some condition.
Poll time.Duration
// PodStart is how long to wait for the pod to be started.
// This value is the default for gomega.Eventually.
PodStart time.Duration
// PodStartShort is same as `PodStart`, but shorter.
// Use it in a case-by-case basis, mostly when you are sure pod start will not be delayed.
// This value is the default for gomega.Consistently.
PodStartShort time.Duration
// PodStartSlow is same as `PodStart`, but longer.
@ -89,26 +97,31 @@ type TimeoutContext struct {
// SnapshotControllerMetrics is how long to wait for snapshot controller metrics.
SnapshotControllerMetrics time.Duration
// SystemPodsStartup is how long to wait for system pods to be running.
SystemPodsStartup time.Duration
// NodeSchedulable is how long to wait for all nodes to be schedulable.
NodeSchedulable time.Duration
// SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready.
SystemDaemonsetStartup time.Duration
}
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values.
func NewTimeoutContextWithDefaults() *TimeoutContext {
return &TimeoutContext{
PodStart: podStartTimeout,
PodStartShort: podStartShortTimeout,
PodStartSlow: podStartSlowTimeout,
PodDelete: podDeleteTimeout,
ClaimProvision: claimProvisionTimeout,
ClaimProvisionShort: claimProvisionShortTimeout,
DataSourceProvision: dataSourceProvisionTimeout,
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,
PVCreate: pvCreateTimeout,
PVDelete: pvDeleteTimeout,
PVDeleteSlow: pvDeleteSlowTimeout,
SnapshotCreate: snapshotCreateTimeout,
SnapshotDelete: snapshotDeleteTimeout,
SnapshotControllerMetrics: snapshotControllerMetricsTimeout,
}
// NewTimeoutContext returns a TimeoutContext with all values set either to
// hard-coded defaults or a value that was configured when running the E2E
// suite. Should be called after command line parsing.
func NewTimeoutContext() *TimeoutContext {
// Make a copy, otherwise the caller would have the ability to modify
// the original values.
copy := TestContext.timeouts
return &copy
}
// PollInterval defines how long to wait between API server queries while
// waiting for some condition.
//
// This value is the default for gomega.Eventually and gomega.Consistently.
func PollInterval() time.Duration {
return TestContext.timeouts.Poll
}

View File

@ -146,7 +146,7 @@ var (
var RunID = uuid.NewUUID()
// CreateTestingNSFn is a func that is responsible for creating namespace used for executing e2e tests.
type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
type CreateTestingNSFn func(ctx context.Context, baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
// APIAddress returns a address of an instance.
func APIAddress() string {
@ -198,9 +198,9 @@ func NodeOSArchIs(supportedNodeOsArchs ...string) bool {
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
func DeleteNamespaces(ctx context.Context, c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
ginkgo.By("Deleting namespaces")
nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
ExpectNoError(err, "Failed to get namespace list")
var deleted []string
var wg sync.WaitGroup
@ -228,7 +228,7 @@ OUTER:
go func(nsName string) {
defer wg.Done()
defer ginkgo.GinkgoRecover()
gomega.Expect(c.CoreV1().Namespaces().Delete(context.TODO(), nsName, metav1.DeleteOptions{})).To(gomega.Succeed())
gomega.Expect(c.CoreV1().Namespaces().Delete(ctx, nsName, metav1.DeleteOptions{})).To(gomega.Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
@ -237,16 +237,16 @@ OUTER:
}
// WaitForNamespacesDeleted waits for the namespaces to be deleted.
func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error {
func WaitForNamespacesDeleted(ctx context.Context, c clientset.Interface, namespaces []string, timeout time.Duration) error {
ginkgo.By(fmt.Sprintf("Waiting for namespaces %+v to vanish", namespaces))
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
return wait.PollWithContext(ctx, 2*time.Second, timeout,
func(ctx context.Context) (bool, error) {
nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
@ -259,20 +259,20 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
})
}
func waitForConfigMapInNamespace(c clientset.Interface, ns, name string, timeout time.Duration) error {
func waitForConfigMapInNamespace(ctx context.Context, c clientset.Interface, ns, name string, timeout time.Duration) error {
fieldSelector := fields.OneTermEqualSelector("metadata.name", name).String()
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, timeout)
defer cancel()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return c.CoreV1().ConfigMaps(ns).List(context.TODO(), options)
return c.CoreV1().ConfigMaps(ns).List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), options)
return c.CoreV1().ConfigMaps(ns).Watch(ctx, options)
},
}
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err := watchtools.UntilWithSync(ctx, lw, &v1.ConfigMap{}, nil, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
@ -285,20 +285,20 @@ func waitForConfigMapInNamespace(c clientset.Interface, ns, name string, timeout
return err
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
func waitForServiceAccountInNamespace(ctx context.Context, c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
fieldSelector := fields.OneTermEqualSelector("metadata.name", serviceAccountName).String()
ctx, cancel := watchtools.ContextWithOptionalTimeout(ctx, timeout)
defer cancel()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return c.CoreV1().ServiceAccounts(ns).List(context.TODO(), options)
return c.CoreV1().ServiceAccounts(ns).List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return c.CoreV1().ServiceAccounts(ns).Watch(context.TODO(), options)
return c.CoreV1().ServiceAccounts(ns).Watch(ctx, options)
},
}
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err := watchtools.UntilWithSync(ctx, lw, &v1.ServiceAccount{}, nil, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
@ -317,20 +317,20 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
func WaitForDefaultServiceAccountInNamespace(ctx context.Context, c clientset.Interface, namespace string) error {
return waitForServiceAccountInNamespace(ctx, c, namespace, defaultServiceAccountName, ServiceAccountProvisionTimeout)
}
// WaitForKubeRootCAInNamespace waits for the configmap kube-root-ca.crt containing the service account
// CA trust bundle to be provisioned in the specified namespace so that pods do not have to retry mounting
// the config map (which creates noise that hides other issues in the Kubelet).
func WaitForKubeRootCAInNamespace(c clientset.Interface, namespace string) error {
return waitForConfigMapInNamespace(c, namespace, "kube-root-ca.crt", ServiceAccountProvisionTimeout)
func WaitForKubeRootCAInNamespace(ctx context.Context, c clientset.Interface, namespace string) error {
return waitForConfigMapInNamespace(ctx, c, namespace, "kube-root-ca.crt", ServiceAccountProvisionTimeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
func CreateTestingNS(ctx context.Context, baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
@ -351,9 +351,9 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
if err := wait.PollImmediateWithContext(ctx, Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
var err error
got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{})
got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{})
if err != nil {
if apierrors.IsAlreadyExists(err) {
// regenerate on conflict
@ -370,7 +370,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
if err := WaitForDefaultServiceAccountInNamespace(ctx, c, got.Name); err != nil {
// Even if we fail to create serviceAccount in the namespace,
// we have successfully create a namespace.
// So, return the created namespace.
@ -382,7 +382,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
@ -398,7 +398,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
namespaces, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
@ -420,10 +420,10 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
}
// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.CoreV1().Endpoints(namespace).List(context.TODO(), metav1.ListOptions{})
list, err := c.CoreV1().Endpoints(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
@ -547,8 +547,8 @@ func TryKill(cmd *exec.Cmd) {
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
return TestContext.CloudConfig.Provider.EnsureLoadBalancerResourcesDeleted(ip, portRange)
func EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error {
return TestContext.CloudConfig.Provider.EnsureLoadBalancerResourcesDeleted(ctx, ip, portRange)
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
@ -613,11 +613,11 @@ func RunCmdEnv(env []string, command string, args ...string) (string, string, er
// getControlPlaneAddresses returns the externalIP, internalIP and hostname fields of control plane nodes.
// If any of these is unavailable, empty slices are returned.
func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []string) {
func getControlPlaneAddresses(ctx context.Context, c clientset.Interface) ([]string, []string, []string) {
var externalIPs, internalIPs, hostnames []string
// Populate the internal IPs.
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
@ -647,8 +647,8 @@ func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []stri
// It may return internal and external IPs, even if we expect for
// e.g. internal IPs to be used (issue #56787), so that we can be
// sure to block the control plane fully during tests.
func GetControlPlaneAddresses(c clientset.Interface) []string {
externalIPs, internalIPs, _ := getControlPlaneAddresses(c)
func GetControlPlaneAddresses(ctx context.Context, c clientset.Interface) []string {
externalIPs, internalIPs, _ := getControlPlaneAddresses(ctx, c)
ips := sets.NewString()
switch TestContext.Provider {
@ -685,7 +685,7 @@ func PrettyPrintJSON(metrics interface{}) string {
// WatchEventSequenceVerifier ...
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
//
// testContext cancellation signal across API boundaries, e.g: context.TODO()
// ctx cancellation signal across API boundaries, e.g: context from Ginkgo
// dc sets up a client to the API
// resourceType specify the type of resource
// namespace select a namespace

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -51,7 +51,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec"
@ -149,7 +148,11 @@ type Test struct {
}
// NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
func NewNFSServer(ctx context.Context, cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
return NewNFSServerWithNodeName(ctx, cs, namespace, args, "")
}
func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, namespace string, args []string, nodeName string) (config TestConfig, pod *v1.Pod, host string) {
config = TestConfig{
Namespace: namespace,
Prefix: "nfs",
@ -158,10 +161,14 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
ServerVolumes: map[string]string{"": "/exports"},
ServerReadyMessage: "NFS started",
}
if nodeName != "" {
config.ClientNodeSelection = e2epod.NodeSelection{Name: nodeName}
}
if len(args) > 0 {
config.ServerArgs = args
}
pod, host = CreateStorageServer(cs, config)
pod, host = CreateStorageServer(ctx, cs, config)
if strings.Contains(host, ":") {
host = "[" + host + "]"
}
@ -171,18 +178,18 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
// and ip address string are returned.
// Note: Expect() is called so no error is returned.
func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
pod = startVolumeServer(cs, config)
func CreateStorageServer(ctx context.Context, cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
pod = startVolumeServer(ctx, cs, config)
gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
ip = pod.Status.PodIP
gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
gomega.Expect(ip).NotTo(gomega.BeEmpty(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
framework.Logf("%s server pod IP address: %s", config.Prefix, ip)
return pod, ip
}
// GetVolumeAttachmentName returns the hash value of the provisioner, the config ClientNodeSelection name,
// and the VolumeAttachment name of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
func GetVolumeAttachmentName(ctx context.Context, cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
var nodeName string
// For provisioning tests, ClientNodeSelection is not set so we do not know the NodeName of the VolumeAttachment of the PV that is
// bound to the PVC with the passed in claimName and claimNamespace. We need this NodeName because it is used to generate the
@ -190,9 +197,9 @@ func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisio
// To get the nodeName of the VolumeAttachment, we get all the VolumeAttachments, look for the VolumeAttachment with a
// PersistentVolumeName equal to the PV that is bound to the passed in PVC, and then we get the NodeName from that VolumeAttachment.
if config.ClientNodeSelection.Name == "" {
claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
pvName := claim.Spec.VolumeName
volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{})
for _, volumeAttachment := range volumeAttachments.Items {
if *volumeAttachment.Spec.Source.PersistentVolumeName == pvName {
nodeName = volumeAttachment.Spec.NodeName
@ -202,21 +209,21 @@ func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisio
} else {
nodeName = config.ClientNodeSelection.Name
}
handle := getVolumeHandle(cs, claimName, claimNamespace)
handle := getVolumeHandle(ctx, cs, claimName, claimNamespace)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, provisioner, nodeName)))
return fmt.Sprintf("csi-%x", attachmentHash)
}
// getVolumeHandle returns the VolumeHandle of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace string) string {
func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName string, claimNamespace string) string {
// re-get the claim to the latest state with bound volume
claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
pv, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
@ -229,9 +236,9 @@ func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace st
}
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.
if apierrors.IsNotFound(err) {
@ -250,7 +257,7 @@ func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Inter
// startVolumeServer starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
func startVolumeServer(ctx context.Context, client clientset.Interface, config TestConfig) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
portCount := len(config.ServerPorts)
@ -330,14 +337,18 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
},
}
if config.ClientNodeSelection.Name != "" {
serverPod.Spec.NodeName = config.ClientNodeSelection.Name
}
var pod *v1.Pod
serverPod, err := podClient.Create(context.TODO(), serverPod, metav1.CreateOptions{})
serverPod, err := podClient.Create(ctx, serverPod, metav1.CreateOptions{})
// ok if the server pod already exists. TODO: make this controllable by callers
if err != nil {
if apierrors.IsAlreadyExists(err) {
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
serverPod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
serverPod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
pod = serverPod
} else {
@ -345,25 +356,25 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
}
}
if config.WaitForCompletion {
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, metav1.DeleteOptions{}))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, client, serverPod.Name, serverPod.Namespace))
framework.ExpectNoError(podClient.Delete(ctx, serverPod.Name, metav1.DeleteOptions{}))
} else {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, client, serverPod))
if pod == nil {
ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
pod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
if config.ServerReadyMessage != "" {
_, err := e2epodoutput.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
_, err := e2epodoutput.LookForStringInLogWithoutKubectl(ctx, client, pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
}
// TestServerCleanup cleans server pod.
func TestServerCleanup(f *framework.Framework, config TestConfig) {
func TestServerCleanup(ctx context.Context, f *framework.Framework, config TestConfig) {
ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer ginkgo.GinkgoRecover()
@ -371,11 +382,11 @@ func TestServerCleanup(f *framework.Framework, config TestConfig) {
return
}
err := e2epod.DeletePodWithWaitByName(f.ClientSet, config.Prefix+"-server", config.Namespace)
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
err := e2epod.DeletePodWithWaitByName(ctx, f.ClientSet, config.Prefix+"-server", config.Namespace)
framework.ExpectNoError(err, "delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
var gracePeriod int64 = 1
var command string
@ -453,18 +464,18 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
})
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
clientPod, err := podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{})
clientPod, err := podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{})
if err != nil {
return nil, err
}
if slow {
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
} else {
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
}
if err != nil {
e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name)
_ = e2epod.WaitForPodNotFoundInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodDelete)
return nil, err
}
return clientPod, nil
@ -519,8 +530,8 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.podStartTimeout.
// It should be used for cases where "regular" dynamic provisioning of an empty volume is requested.
func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(f, config, fsGroup, fsType, tests, false)
func TestVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(ctx, f, config, fsGroup, fsType, tests, false)
}
// TestVolumeClientSlow is the same as TestVolumeClient except for its timeout.
@ -528,19 +539,21 @@ func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.slowPodStartTimeout.
// It should be used for cases where "special" dynamic provisioning is requested, such as volume cloning
// or snapshot restore.
func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(f, config, fsGroup, fsType, tests, true)
func TestVolumeClientSlow(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(ctx, f, config, fsGroup, fsType, tests, true)
}
func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
func testVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
timeouts := f.Timeouts
clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
clientPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
if err != nil {
framework.Failf("Failed to create client pod: %v", err)
}
defer func() {
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
// testVolumeClient might get used more than once per test, therefore
// we have to clean up before returning.
e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name)
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, clientPod.Namespace, timeouts.PodDelete))
}()
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
@ -551,7 +564,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
}
ec.Resources = v1.ResourceRequirements{}
ec.Name = "volume-ephemeral-container"
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(ctx, clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
@ -560,20 +573,22 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
// InjectContent inserts index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
func InjectContent(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
privileged := true
timeouts := f.Timeouts
if framework.NodeOSDistroIs("windows") {
privileged = false
}
injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
injectorPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
if err != nil {
framework.Failf("Failed to create injector pod: %v", err)
return
}
defer func() {
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
// This pod must get deleted before the function returns becaue the test relies on
// the volume not being in use.
e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name)
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, injectorPod.Name, injectorPod.Namespace, timeouts.PodDelete))
}()
ginkgo.By("Writing text file contents in the container.")

View File

@ -23,12 +23,13 @@ import (
"errors"
"fmt"
"github.com/onsi/ginkgo/v2"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storagev1 "k8s.io/api/storage/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@ -49,7 +50,7 @@ import (
//
// LoadFromManifests has some limitations:
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
// https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1)
// and silently ignored
// - the latest stable API version for each item is used, regardless of what
// is specified in the manifest files
@ -140,21 +141,7 @@ func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ...
// PatchItems has the some limitations as LoadFromManifests:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) (func(), error) {
var destructors []func() error
cleanup := func() {
// TODO (?): use same logic as framework.go for determining
// whether we are expected to clean up? This would change the
// meaning of the -delete-namespace and -delete-namespace-on-failure
// command line flags, because they would also start to apply
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrors.IsNotFound(err) {
framework.Logf("deleting failed: %s", err)
}
}
}
func CreateItems(ctx context.Context, f *framework.Framework, ns *v1.Namespace, items ...interface{}) error {
var result error
for _, item := range items {
// Each factory knows which item(s) it supports, so try each one.
@ -164,12 +151,9 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
framework.Logf("creating %s", description)
for _, factory := range factories {
destructor, err := factory.Create(f, ns, item)
destructor, err := factory.Create(ctx, f, ns, item)
if destructor != nil {
destructors = append(destructors, func() error {
framework.Logf("deleting %s", description)
return destructor()
})
ginkgo.DeferCleanup(framework.IgnoreNotFound(destructor), framework.AnnotatedLocation(fmt.Sprintf("deleting %s", description)))
}
if err == nil {
done = true
@ -185,33 +169,28 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
}
}
if result != nil {
cleanup()
return nil, result
}
return cleanup, nil
return result
}
// CreateFromManifests is a combination of LoadFromManifests,
// PatchItems, patching with an optional custom function,
// and CreateItems.
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
func CreateFromManifests(ctx context.Context, f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) error {
items, err := LoadFromManifests(files...)
if err != nil {
return nil, fmt.Errorf("CreateFromManifests: %w", err)
return fmt.Errorf("CreateFromManifests: %w", err)
}
if err := PatchItems(f, driverNamespace, items...); err != nil {
return nil, err
return err
}
if patch != nil {
for _, item := range items {
if err := patch(item); err != nil {
return nil, err
return err
}
}
}
return CreateItems(f, driverNamespace, items...)
return CreateItems(ctx, f, driverNamespace, items...)
}
// What is a subset of metav1.TypeMeta which (in contrast to
@ -251,7 +230,7 @@ type ItemFactory interface {
// error or a cleanup function for the created item.
// If the item is of an unsupported type, it must return
// an error that has errorItemNotSupported as cause.
Create(f *framework.Framework, ns *v1.Namespace, item interface{}) (func() error, error)
Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, item interface{}) (func(ctx context.Context) error, error)
}
// describeItem always returns a string that describes the item,
@ -410,17 +389,17 @@ func (f *serviceAccountFactory) New() runtime.Object {
return &v1.ServiceAccount{}
}
func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*serviceAccountFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*v1.ServiceAccount)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create ServiceAccount: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -430,7 +409,7 @@ func (f *clusterRoleFactory) New() runtime.Object {
return &rbacv1.ClusterRole{}
}
func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*clusterRoleFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*rbacv1.ClusterRole)
if !ok {
return nil, errorItemNotSupported
@ -438,11 +417,11 @@ func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
framework.Logf("Define cluster role %v", item.GetName())
client := f.ClientSet.RbacV1().ClusterRoles()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create ClusterRole: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -452,18 +431,18 @@ func (f *clusterRoleBindingFactory) New() runtime.Object {
return &rbacv1.ClusterRoleBinding{}
}
func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*clusterRoleBindingFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*rbacv1.ClusterRoleBinding)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().ClusterRoleBindings()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create ClusterRoleBinding: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -473,18 +452,18 @@ func (f *roleFactory) New() runtime.Object {
return &rbacv1.Role{}
}
func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*roleFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*rbacv1.Role)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().Roles(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create Role: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -494,18 +473,18 @@ func (f *roleBindingFactory) New() runtime.Object {
return &rbacv1.RoleBinding{}
}
func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*roleBindingFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*rbacv1.RoleBinding)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.RbacV1().RoleBindings(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create RoleBinding: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -515,18 +494,18 @@ func (f *serviceFactory) New() runtime.Object {
return &v1.Service{}
}
func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*serviceFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*v1.Service)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Services(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create Service: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -536,18 +515,18 @@ func (f *statefulSetFactory) New() runtime.Object {
return &appsv1.StatefulSet{}
}
func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*statefulSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*appsv1.StatefulSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().StatefulSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create StatefulSet: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -557,18 +536,18 @@ func (f *deploymentFactory) New() runtime.Object {
return &appsv1.Deployment{}
}
func (*deploymentFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*deploymentFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*appsv1.Deployment)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().Deployments(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create Deployment: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -578,18 +557,18 @@ func (f *daemonSetFactory) New() runtime.Object {
return &appsv1.DaemonSet{}
}
func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*daemonSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*appsv1.DaemonSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().DaemonSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create DaemonSet: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -599,18 +578,18 @@ func (f *replicaSetFactory) New() runtime.Object {
return &appsv1.ReplicaSet{}
}
func (*replicaSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*replicaSetFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*appsv1.ReplicaSet)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.AppsV1().ReplicaSets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create ReplicaSet: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -620,18 +599,18 @@ func (f *storageClassFactory) New() runtime.Object {
return &storagev1.StorageClass{}
}
func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*storageClassFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*storagev1.StorageClass)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.StorageV1().StorageClasses()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create StorageClass: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -641,18 +620,18 @@ func (f *csiDriverFactory) New() runtime.Object {
return &storagev1.CSIDriver{}
}
func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*csiDriverFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*storagev1.CSIDriver)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.StorageV1().CSIDrivers()
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create CSIDriver: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -662,18 +641,18 @@ func (f *secretFactory) New() runtime.Object {
return &v1.Secret{}
}
func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*secretFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
item, ok := i.(*v1.Secret)
if !ok {
return nil, errorItemNotSupported
}
client := f.ClientSet.CoreV1().Secrets(ns.Name)
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create Secret: %w", err)
}
return func() error {
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}
@ -683,7 +662,7 @@ func (f *customResourceDefinitionFactory) New() runtime.Object {
return &apiextensionsv1.CustomResourceDefinition{}
}
func (*customResourceDefinitionFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
func (*customResourceDefinitionFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
var err error
unstructCRD := &unstructured.Unstructured{}
gvr := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
@ -698,11 +677,11 @@ func (*customResourceDefinitionFactory) Create(f *framework.Framework, ns *v1.Na
return nil, err
}
if _, err = f.DynamicClient.Resource(gvr).Create(context.TODO(), unstructCRD, metav1.CreateOptions{}); err != nil {
if _, err = f.DynamicClient.Resource(gvr).Create(ctx, unstructCRD, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("create CustomResourceDefinition: %w", err)
}
return func() error {
return f.DynamicClient.Resource(gvr).Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
return func(ctx context.Context) error {
return f.DynamicClient.Resource(gvr).Delete(ctx, item.GetName(), metav1.DeleteOptions{})
}, nil
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package utils
import (
"fmt"
"path"
"strings"
@ -94,6 +95,11 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
container.VolumeMounts[e].MountPath = substKubeletRootDir(container.VolumeMounts[e].MountPath)
}
if len(o.Features) > 0 && len(o.Features[container.Name]) > 0 {
featuregateString := strings.Join(o.Features[container.Name], ",")
container.Args = append(container.Args, fmt.Sprintf("--feature-gates=%s", featuregateString))
}
// Overwrite driver name resp. provider name
// by appending a parameter with the right
// value.
@ -218,4 +224,10 @@ type PatchCSIOptions struct {
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
SELinuxMount *bool
// If not nil, the values will be used for setting feature arguments to
// specific sidecar.
// Feature is a map - where key is sidecar name such as:
// -- key: resizer
// -- value: []string{feature-gates}
Features map[string][]string
}

View File

@ -1,263 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
const (
volumeAttachmentStatusPollDelay = 2 * time.Second
volumeAttachmentStatusFactor = 2
volumeAttachmentStatusSteps = 6
// represents expected attachment status of a volume after attach
volumeAttachedStatus = "attached"
// represents expected attachment status of a volume after detach
volumeDetachedStatus = "detached"
)
// EBSUtil provides functions to interact with EBS volumes
type EBSUtil struct {
client *ec2.EC2
validDevices []string
}
// NewEBSUtil returns an instance of EBSUtil which can be used to
// to interact with EBS volumes
func NewEBSUtil(client *ec2.EC2) *EBSUtil {
ebsUtil := &EBSUtil{client: client}
validDevices := []string{}
for _, firstChar := range []rune{'b', 'c'} {
for i := 'a'; i <= 'z'; i++ {
dev := string([]rune{firstChar, i})
validDevices = append(validDevices, dev)
}
}
ebsUtil.validDevices = validDevices
return ebsUtil
}
// AttachDisk attaches an EBS volume to a node.
func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
instance, err := findInstanceByNodeName(nodeName, ebs.client)
if err != nil {
return fmt.Errorf("error finding node %s: %v", nodeName, err)
}
err = ebs.waitForAvailable(volumeID)
if err != nil {
return fmt.Errorf("error waiting volume %s to be available: %v", volumeID, err)
}
device, err := ebs.findFreeDevice(instance)
if err != nil {
return fmt.Errorf("error finding free device on node %s: %v", nodeName, err)
}
hostDevice := "/dev/xvd" + string(device)
attachInput := &ec2.AttachVolumeInput{
VolumeId: &volumeID,
InstanceId: instance.InstanceId,
Device: &hostDevice,
}
_, err = ebs.client.AttachVolume(attachInput)
if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", volumeID, nodeName, err)
}
return ebs.waitForAttach(volumeID)
}
func (ebs *EBSUtil) findFreeDevice(instance *ec2.Instance) (string, error) {
deviceMappings := map[string]string{}
for _, blockDevice := range instance.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
name = strings.TrimPrefix(name, "/dev/sd")
name = strings.TrimPrefix(name, "/dev/xvd")
if len(name) < 1 || len(name) > 2 {
klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
}
deviceMappings[name] = aws.StringValue(blockDevice.Ebs.VolumeId)
}
for _, device := range ebs.validDevices {
if _, found := deviceMappings[device]; !found {
return device, nil
}
}
return "", fmt.Errorf("no available device")
}
func (ebs *EBSUtil) waitForAttach(volumeID string) error {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
time.Sleep(volumeAttachmentStatusPollDelay)
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
info, err := ebs.describeVolume(volumeID)
if err != nil {
return false, err
}
if len(info.Attachments) > 1 {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
}
attachmentStatus := ""
for _, a := range info.Attachments {
if attachmentStatus != "" {
// Shouldn't happen; log so we know if it is
klog.Warningf("Found multiple attachments for volume %q: %v", volumeID, info)
}
if a.State != nil {
attachmentStatus = *a.State
} else {
// Shouldn't happen; log so we know if it is
klog.Warningf("Ignoring nil attachment state for volume %q: %v", volumeID, a)
}
}
if attachmentStatus == "" {
attachmentStatus = volumeDetachedStatus
}
if attachmentStatus == volumeAttachedStatus {
// Attachment is in requested state, finish waiting
return true, nil
}
return false, nil
})
return err
}
func (ebs *EBSUtil) waitForAvailable(volumeID string) error {
backoff := wait.Backoff{
Duration: volumeAttachmentStatusPollDelay,
Factor: volumeAttachmentStatusFactor,
Steps: volumeAttachmentStatusSteps,
}
time.Sleep(volumeAttachmentStatusPollDelay)
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
info, err := ebs.describeVolume(volumeID)
if err != nil {
return false, err
}
volumeState := aws.StringValue(info.State)
if volumeState != ec2.VolumeStateAvailable {
return false, nil
}
return true, nil
})
return err
}
// Gets the full information about this volume from the EC2 API
func (ebs *EBSUtil) describeVolume(volumeID string) (*ec2.Volume, error) {
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
results := []*ec2.Volume{}
var nextToken *string
for {
response, err := ebs.client.DescribeVolumes(request)
if err != nil {
return nil, err
}
results = append(results, response.Volumes...)
nextToken = response.NextToken
if aws.StringValue(nextToken) == "" {
break
}
request.NextToken = nextToken
}
if len(results) == 0 {
return nil, fmt.Errorf("no volumes found")
}
if len(results) > 1 {
return nil, fmt.Errorf("multiple volumes found")
}
return results[0], nil
}
func newEc2Filter(name string, value string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
Values: []*string{
aws.String(value),
},
}
return filter
}
func findInstanceByNodeName(nodeName string, cloud *ec2.EC2) (*ec2.Instance, error) {
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", nodeName),
}
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := describeInstances(request, cloud)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, nil
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
}
return instances[0], nil
}
func describeInstances(request *ec2.DescribeInstancesInput, cloud *ec2.EC2) ([]*ec2.Instance, error) {
// Instances are paged
results := []*ec2.Instance{}
var nextToken *string
for {
response, err := cloud.DescribeInstances(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err)
}
for _, reservation := range response.Reservations {
results = append(results, reservation.Instances...)
}
nextToken = response.NextToken
if nextToken == nil || len(*nextToken) == 0 {
break
}
request.NextToken = nextToken
}
return results, nil
}

View File

@ -47,10 +47,10 @@ func LogResult(result Result) {
// HostExec represents interface we require to execute commands on remote host.
type HostExec interface {
Execute(cmd string, node *v1.Node) (Result, error)
IssueCommandWithResult(cmd string, node *v1.Node) (string, error)
IssueCommand(cmd string, node *v1.Node) error
Cleanup()
Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error)
IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error)
IssueCommand(ctx context.Context, cmd string, node *v1.Node) error
Cleanup(ctx context.Context)
}
// hostExecutor implements HostExec
@ -69,18 +69,25 @@ func NewHostExec(framework *framework.Framework) HostExec {
// launchNodeExecPod launches a hostexec pod for local PV and waits
// until it's Running.
func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
func (h *hostExecutor) launchNodeExecPod(ctx context.Context, node string) *v1.Pod {
f := h.Framework
cs := f.ClientSet
ns := f.Namespace
hostExecPod := e2epod.NewExecPodSpec(ns.Name, "", true)
hostExecPod.GenerateName = fmt.Sprintf("hostexec-%s-", node)
// Use NodeAffinity instead of NodeName so that pods will not
// be immediately Failed by kubelet if it's out of space. Instead
// Pods will be pending in the scheduler until there is space freed
// up.
e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
if framework.TestContext.NodeE2E {
// E2E node tests do not run a scheduler, so set the node name directly
hostExecPod.Spec.NodeName = node
} else {
// Use NodeAffinity instead of NodeName so that pods will not
// be immediately Failed by kubelet if it's out of space. Instead
// Pods will be pending in the scheduler until there is space freed
// up.
e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
}
hostExecPod.Spec.Volumes = []v1.Volume{
{
// Required to enter into host mount namespace via nsenter.
@ -104,9 +111,9 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
return &privileged
}(true),
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err)
return pod
}
@ -115,8 +122,8 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
// performing the remote command execution, the stdout, stderr and exit code
// are returned.
// This works like ssh.SSH(...) utility.
func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
result, err := h.exec(cmd, node)
func (h *hostExecutor) Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
result, err := h.exec(ctx, cmd, node)
if codeExitErr, ok := err.(exec.CodeExitError); ok {
// extract the exit code of remote command and silence the command
// non-zero exit code error
@ -126,14 +133,14 @@ func (h *hostExecutor) Execute(cmd string, node *v1.Node) (Result, error) {
return result, err
}
func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
func (h *hostExecutor) exec(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
result := Result{
Host: node.Name,
Cmd: cmd,
}
pod, ok := h.nodeExecPods[node.Name]
if !ok {
pod = h.launchNodeExecPod(node.Name)
pod = h.launchNodeExecPod(ctx, node.Name)
if pod == nil {
return result, fmt.Errorf("failed to create hostexec pod for node %q", node)
}
@ -165,8 +172,8 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
// IssueCommandWithResult issues command on the given node and returns stdout as
// result. It returns error if there are some issues executing the command or
// the command exits non-zero.
func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string, error) {
result, err := h.exec(cmd, node)
func (h *hostExecutor) IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error) {
result, err := h.exec(ctx, cmd, node)
if err != nil {
LogResult(result)
}
@ -174,17 +181,17 @@ func (h *hostExecutor) IssueCommandWithResult(cmd string, node *v1.Node) (string
}
// IssueCommand works like IssueCommandWithResult, but discards result.
func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
_, err := h.IssueCommandWithResult(cmd, node)
func (h *hostExecutor) IssueCommand(ctx context.Context, cmd string, node *v1.Node) error {
_, err := h.IssueCommandWithResult(ctx, cmd, node)
return err
}
// Cleanup cleanup resources it created during test.
// Note that in most cases it is not necessary to call this because we create
// pods under test namespace which will be destroyed in teardown phase.
func (h *hostExecutor) Cleanup() {
func (h *hostExecutor) Cleanup(ctx context.Context) {
for _, pod := range h.nodeExecPods {
e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
e2epod.DeletePodOrFail(ctx, h.Framework.ClientSet, pod.Namespace, pod.Name)
}
h.nodeExecPods = make(map[string]*v1.Pod)
}

View File

@ -21,6 +21,7 @@ package utils
*/
import (
"context"
"fmt"
"path/filepath"
"strings"
@ -69,9 +70,9 @@ type LocalTestResource struct {
// LocalTestResourceManager represents interface to create/destroy local test resources on node
type LocalTestResourceManager interface {
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
ExpandBlockDevice(ltr *LocalTestResource, mbToAdd int) error
Remove(ltr *LocalTestResource)
Create(ctx context.Context, node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
ExpandBlockDevice(ctx context.Context, ltr *LocalTestResource, mbToAdd int) error
Remove(ctx context.Context, ltr *LocalTestResource)
}
// ltrMgr implements LocalTestResourceManager
@ -98,10 +99,10 @@ func (l *ltrMgr) getTestDir() string {
return filepath.Join(l.hostBase, testDirName)
}
func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]string) *LocalTestResource {
func (l *ltrMgr) setupLocalVolumeTmpfs(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
ginkgo.By(fmt.Sprintf("Creating tmpfs mount point on node %q at path %q", node.Name, hostDir))
err := l.hostExec.IssueCommand(fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("mkdir -p %q && mount -t tmpfs -o size=10m tmpfs-%q %q", hostDir, hostDir, hostDir), node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
@ -109,18 +110,18 @@ func (l *ltrMgr) setupLocalVolumeTmpfs(node *v1.Node, parameters map[string]stri
}
}
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ltr *LocalTestResource) {
func (l *ltrMgr) cleanupLocalVolumeTmpfs(ctx context.Context, ltr *LocalTestResource) {
ginkgo.By(fmt.Sprintf("Unmount tmpfs mount point on node %q at path %q", ltr.Node.Name, ltr.Path))
err := l.hostExec.IssueCommand(fmt.Sprintf("umount %q", ltr.Path), ltr.Node)
err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("umount %q", ltr.Path), ltr.Node)
framework.ExpectNoError(err)
ginkgo.By("Removing the test directory")
err = l.hostExec.IssueCommand(fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
err = l.hostExec.IssueCommand(ctx, fmt.Sprintf("rm -r %s", ltr.Path), ltr.Node)
framework.ExpectNoError(err)
}
// createAndSetupLoopDevice creates an empty file and associates a loop devie with it.
func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
func (l *ltrMgr) createAndSetupLoopDevice(ctx context.Context, dir string, node *v1.Node, size int) {
ginkgo.By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
count := size / 4096
@ -130,22 +131,22 @@ func (l *ltrMgr) createAndSetupLoopDevice(dir string, node *v1.Node, size int) {
}
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=4096 count=%d", dir, count)
losetupCmd := fmt.Sprintf("losetup -f %s/file", dir)
err := l.hostExec.IssueCommand(fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
err := l.hostExec.IssueCommand(ctx, fmt.Sprintf("%s && %s && %s", mkdirCmd, ddCmd, losetupCmd), node)
framework.ExpectNoError(err)
}
// findLoopDevice finds loop device path by its associated storage directory.
func (l *ltrMgr) findLoopDevice(dir string, node *v1.Node) string {
func (l *ltrMgr) findLoopDevice(ctx context.Context, dir string, node *v1.Node) string {
cmd := fmt.Sprintf("E2E_LOOP_DEV=$(losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", dir)
loopDevResult, err := l.hostExec.IssueCommandWithResult(cmd, node)
loopDevResult, err := l.hostExec.IssueCommandWithResult(ctx, cmd, node)
framework.ExpectNoError(err)
return strings.TrimSpace(loopDevResult)
}
func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]string) *LocalTestResource {
func (l *ltrMgr) setupLocalVolumeBlock(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
loopDir := l.getTestDir()
l.createAndSetupLoopDevice(loopDir, node, 20*1024*1024)
loopDev := l.findLoopDevice(loopDir, node)
l.createAndSetupLoopDevice(ctx, loopDir, node, 20*1024*1024)
loopDev := l.findLoopDevice(ctx, loopDir, node)
return &LocalTestResource{
Node: node,
Path: loopDev,
@ -154,30 +155,30 @@ func (l *ltrMgr) setupLocalVolumeBlock(node *v1.Node, parameters map[string]stri
}
// teardownLoopDevice tears down loop device by its associated storage directory.
func (l *ltrMgr) teardownLoopDevice(dir string, node *v1.Node) {
loopDev := l.findLoopDevice(dir, node)
func (l *ltrMgr) teardownLoopDevice(ctx context.Context, dir string, node *v1.Node) {
loopDev := l.findLoopDevice(ctx, dir, node)
ginkgo.By(fmt.Sprintf("Tear down block device %q on node %q at path %s/file", loopDev, node.Name, dir))
losetupDeleteCmd := fmt.Sprintf("losetup -d %s", loopDev)
err := l.hostExec.IssueCommand(losetupDeleteCmd, node)
err := l.hostExec.IssueCommand(ctx, losetupDeleteCmd, node)
framework.ExpectNoError(err)
return
}
func (l *ltrMgr) cleanupLocalVolumeBlock(ltr *LocalTestResource) {
l.teardownLoopDevice(ltr.loopDir, ltr.Node)
func (l *ltrMgr) cleanupLocalVolumeBlock(ctx context.Context, ltr *LocalTestResource) {
l.teardownLoopDevice(ctx, ltr.loopDir, ltr.Node)
ginkgo.By(fmt.Sprintf("Removing the test directory %s", ltr.loopDir))
removeCmd := fmt.Sprintf("rm -r %s", ltr.loopDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]string) *LocalTestResource {
ltr := l.setupLocalVolumeBlock(node, parameters)
func (l *ltrMgr) setupLocalVolumeBlockFS(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
ltr := l.setupLocalVolumeBlock(ctx, node, parameters)
loopDev := ltr.Path
loopDir := ltr.loopDir
// Format and mount at loopDir and give others rwx for read/write testing
cmd := fmt.Sprintf("mkfs -t ext4 %s && mount -t ext4 %s %s && chmod o+rwx %s", loopDev, loopDev, loopDir, loopDir)
err := l.hostExec.IssueCommand(cmd, node)
err := l.hostExec.IssueCommand(ctx, cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
@ -186,17 +187,17 @@ func (l *ltrMgr) setupLocalVolumeBlockFS(node *v1.Node, parameters map[string]st
}
}
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ltr *LocalTestResource) {
func (l *ltrMgr) cleanupLocalVolumeBlockFS(ctx context.Context, ltr *LocalTestResource) {
umountCmd := fmt.Sprintf("umount %s", ltr.Path)
err := l.hostExec.IssueCommand(umountCmd, ltr.Node)
err := l.hostExec.IssueCommand(ctx, umountCmd, ltr.Node)
framework.ExpectNoError(err)
l.cleanupLocalVolumeBlock(ltr)
l.cleanupLocalVolumeBlock(ctx, ltr)
}
func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]string) *LocalTestResource {
func (l *ltrMgr) setupLocalVolumeDirectory(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
mkdirCmd := fmt.Sprintf("mkdir -p %s", hostDir)
err := l.hostExec.IssueCommand(mkdirCmd, node)
err := l.hostExec.IssueCommand(ctx, mkdirCmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
@ -204,18 +205,18 @@ func (l *ltrMgr) setupLocalVolumeDirectory(node *v1.Node, parameters map[string]
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectory(ltr *LocalTestResource) {
func (l *ltrMgr) cleanupLocalVolumeDirectory(ctx context.Context, ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
removeCmd := fmt.Sprintf("rm -r %s", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[string]string) *LocalTestResource {
func (l *ltrMgr) setupLocalVolumeDirectoryLink(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
err := l.hostExec.IssueCommand(ctx, cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
@ -223,19 +224,19 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLink(node *v1.Node, parameters map[str
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ltr *LocalTestResource) {
func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ctx context.Context, ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm -r %s && rm -r %s", hostDir, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s", hostDir, hostDir, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
err := l.hostExec.IssueCommand(ctx, cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
@ -243,19 +244,19 @@ func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(node *v1.Node, parameters
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ltr *LocalTestResource) {
func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ctx context.Context, ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
removeCmd := fmt.Sprintf("umount %s && rm -r %s", hostDir, hostDir)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, parameters map[string]string) *LocalTestResource {
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
hostDir := l.getTestDir()
hostDirBackend := hostDir + "-backend"
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
err := l.hostExec.IssueCommand(cmd, node)
err := l.hostExec.IssueCommand(ctx, cmd, node)
framework.ExpectNoError(err)
return &LocalTestResource{
Node: node,
@ -263,17 +264,17 @@ func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(node *v1.Node, paramet
}
}
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ltr *LocalTestResource) {
func (l *ltrMgr) cleanupLocalVolumeDirectoryLinkBindMounted(ctx context.Context, ltr *LocalTestResource) {
ginkgo.By("Removing the test directory")
hostDir := ltr.Path
hostDirBackend := hostDir + "-backend"
removeCmd := fmt.Sprintf("rm %s && umount %s && rm -r %s", hostDir, hostDirBackend, hostDirBackend)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[string]string) *LocalTestResource {
res, err := l.hostExec.IssueCommandWithResult("ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
func (l *ltrMgr) setupLocalVolumeGCELocalSSD(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
res, err := l.hostExec.IssueCommandWithResult(ctx, "ls /mnt/disks/by-uuid/google-local-ssds-scsi-fs/", node)
framework.ExpectNoError(err)
dirName := strings.Fields(res)[0]
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
@ -283,47 +284,47 @@ func (l *ltrMgr) setupLocalVolumeGCELocalSSD(node *v1.Node, parameters map[strin
}
}
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ctx context.Context, ltr *LocalTestResource) {
// This filesystem is attached in cluster initialization, we clean all files to make it reusable.
removeCmd := fmt.Sprintf("find '%s' -mindepth 1 -maxdepth 1 -print0 | xargs -r -0 rm -rf", ltr.Path)
err := l.hostExec.IssueCommand(removeCmd, ltr.Node)
err := l.hostExec.IssueCommand(ctx, removeCmd, ltr.Node)
framework.ExpectNoError(err)
}
func (l *ltrMgr) expandLocalVolumeBlockFS(ltr *LocalTestResource, mbToAdd int) error {
func (l *ltrMgr) expandLocalVolumeBlockFS(ctx context.Context, ltr *LocalTestResource, mbToAdd int) error {
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file conv=notrunc oflag=append bs=1M count=%d", ltr.loopDir, mbToAdd)
loopDev := l.findLoopDevice(ltr.loopDir, ltr.Node)
loopDev := l.findLoopDevice(ctx, ltr.loopDir, ltr.Node)
losetupCmd := fmt.Sprintf("losetup -c %s", loopDev)
return l.hostExec.IssueCommand(fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node)
return l.hostExec.IssueCommand(ctx, fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node)
}
func (l *ltrMgr) ExpandBlockDevice(ltr *LocalTestResource, mbtoAdd int) error {
func (l *ltrMgr) ExpandBlockDevice(ctx context.Context, ltr *LocalTestResource, mbtoAdd int) error {
switch ltr.VolumeType {
case LocalVolumeBlockFS:
return l.expandLocalVolumeBlockFS(ltr, mbtoAdd)
return l.expandLocalVolumeBlockFS(ctx, ltr, mbtoAdd)
}
return fmt.Errorf("Failed to expand local test resource, unsupported volume type: %s", ltr.VolumeType)
}
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
func (l *ltrMgr) Create(ctx context.Context, node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
var ltr *LocalTestResource
switch volumeType {
case LocalVolumeDirectory:
ltr = l.setupLocalVolumeDirectory(node, parameters)
ltr = l.setupLocalVolumeDirectory(ctx, node, parameters)
case LocalVolumeDirectoryLink:
ltr = l.setupLocalVolumeDirectoryLink(node, parameters)
ltr = l.setupLocalVolumeDirectoryLink(ctx, node, parameters)
case LocalVolumeDirectoryBindMounted:
ltr = l.setupLocalVolumeDirectoryBindMounted(node, parameters)
ltr = l.setupLocalVolumeDirectoryBindMounted(ctx, node, parameters)
case LocalVolumeDirectoryLinkBindMounted:
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(node, parameters)
ltr = l.setupLocalVolumeDirectoryLinkBindMounted(ctx, node, parameters)
case LocalVolumeTmpfs:
ltr = l.setupLocalVolumeTmpfs(node, parameters)
ltr = l.setupLocalVolumeTmpfs(ctx, node, parameters)
case LocalVolumeBlock:
ltr = l.setupLocalVolumeBlock(node, parameters)
ltr = l.setupLocalVolumeBlock(ctx, node, parameters)
case LocalVolumeBlockFS:
ltr = l.setupLocalVolumeBlockFS(node, parameters)
ltr = l.setupLocalVolumeBlockFS(ctx, node, parameters)
case LocalVolumeGCELocalSSD:
ltr = l.setupLocalVolumeGCELocalSSD(node, parameters)
ltr = l.setupLocalVolumeGCELocalSSD(ctx, node, parameters)
default:
framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType)
return nil
@ -335,24 +336,24 @@ func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters ma
return ltr
}
func (l *ltrMgr) Remove(ltr *LocalTestResource) {
func (l *ltrMgr) Remove(ctx context.Context, ltr *LocalTestResource) {
switch ltr.VolumeType {
case LocalVolumeDirectory:
l.cleanupLocalVolumeDirectory(ltr)
l.cleanupLocalVolumeDirectory(ctx, ltr)
case LocalVolumeDirectoryLink:
l.cleanupLocalVolumeDirectoryLink(ltr)
l.cleanupLocalVolumeDirectoryLink(ctx, ltr)
case LocalVolumeDirectoryBindMounted:
l.cleanupLocalVolumeDirectoryBindMounted(ltr)
l.cleanupLocalVolumeDirectoryBindMounted(ctx, ltr)
case LocalVolumeDirectoryLinkBindMounted:
l.cleanupLocalVolumeDirectoryLinkBindMounted(ltr)
l.cleanupLocalVolumeDirectoryLinkBindMounted(ctx, ltr)
case LocalVolumeTmpfs:
l.cleanupLocalVolumeTmpfs(ltr)
l.cleanupLocalVolumeTmpfs(ctx, ltr)
case LocalVolumeBlock:
l.cleanupLocalVolumeBlock(ltr)
l.cleanupLocalVolumeBlock(ctx, ltr)
case LocalVolumeBlockFS:
l.cleanupLocalVolumeBlockFS(ltr)
l.cleanupLocalVolumeBlockFS(ctx, ltr)
case LocalVolumeGCELocalSSD:
l.cleanupLocalVolumeGCELocalSSD(ltr)
l.cleanupLocalVolumeGCELocalSSD(ctx, ltr)
default:
framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType)
}

View File

@ -43,8 +43,8 @@ import (
//
// The output goes to log files (when using --report-dir, as in the
// CI) or the output stream (otherwise).
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
ctx, cancel := context.WithCancel(context.Background())
func StartPodLogs(ctx context.Context, f *framework.Framework, driverNamespace *v1.Namespace) func() {
ctx, cancel := context.WithCancel(ctx)
cs := f.ClientSet
ns := driverNamespace.Name
@ -103,17 +103,17 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
// - If `systemctl` returns stderr "command not found, issues the command via `service`
// - If `service` also returns stderr "command not found", the test is aborted.
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
func KubeletCommand(ctx context.Context, kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command := ""
systemctlPresent := false
kubeletPid := ""
nodeIP, err := getHostAddress(c, pod)
nodeIP, err := getHostAddress(ctx, c, pod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
framework.Logf("Checking if systemctl command is present")
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
sshResult, err := e2essh.SSH(ctx, "systemctl --version", nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
if !strings.Contains(sshResult.Stderr, "command not found") {
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
@ -122,23 +122,23 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
command = fmt.Sprintf("service kubelet %s", string(kOp))
}
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
sudoPresent := isSudoPresent(ctx, nodeIP, framework.TestContext.Provider)
if sudoPresent {
command = fmt.Sprintf("sudo %s", command)
}
if kOp == KRestart {
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
kubeletPid = getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent)
}
framework.Logf("Attempting `%s`", command)
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
sshResult, err = e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
if kOp == KStop {
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
if ok := e2enode.WaitForNodeToBeNotReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
}
}
@ -146,7 +146,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
// Wait for a minute to check if kubelet Pid is getting changed
isPidChanged := false
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
if ctx.Err() != nil {
framework.Fail("timed out waiting for Kubelet POD change")
}
kubeletPidAfterRestart := getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent)
if kubeletPid != kubeletPidAfterRestart {
isPidChanged = true
break
@ -161,7 +164,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
}
if kOp == KStart || kOp == KRestart {
// For kubelet start and restart operations, Wait until Node becomes Ready
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
if ok := e2enode.WaitForNodeToBeReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
}
}
@ -170,8 +173,8 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
// getHostAddress gets the node for a pod and returns the first
// address. Returns an error if the node the pod is on doesn't have an
// address.
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
func getHostAddress(ctx context.Context, client clientset.Interface, p *v1.Pod) (string, error) {
node, err := client.CoreV1().Nodes().Get(ctx, p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}

View File

@ -48,11 +48,11 @@ var (
)
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
func WaitForSnapshotReady(ctx context.Context, c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
if successful := WaitUntil(poll, timeout, func() bool {
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{})
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(ctx, snapshotName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err)
return false
@ -80,12 +80,12 @@ func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, p
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
// given VolumeSnapshot
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured {
func GetSnapshotContentFromSnapshot(ctx context.Context, dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured {
defer ginkgo.GinkgoRecover()
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout)
err := WaitForSnapshotReady(ctx, dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout)
framework.ExpectNoError(err)
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(ctx, snapshot.GetName(), metav1.GetOptions{})
snapshotStatus := vs.Object["status"].(map[string]interface{})
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
@ -93,7 +93,7 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured
framework.Logf("snapshotContentName %s", snapshotContentName)
framework.ExpectNoError(err)
vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
vscontent, err := dc.Resource(SnapshotContentGVR).Get(ctx, snapshotContentName, metav1.GetOptions{})
framework.ExpectNoError(err)
return vscontent
@ -101,9 +101,9 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured
}
// DeleteSnapshotWithoutWaiting deletes a VolumeSnapshot and return directly without waiting
func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName string) error {
func DeleteSnapshotWithoutWaiting(ctx context.Context, dc dynamic.Interface, ns string, snapshotName string) error {
ginkgo.By("deleting the snapshot")
err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
err := dc.Resource(SnapshotGVR).Namespace(ns).Delete(ctx, snapshotName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}
@ -111,15 +111,15 @@ func DeleteSnapshotWithoutWaiting(dc dynamic.Interface, ns string, snapshotName
}
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
func DeleteAndWaitSnapshot(ctx context.Context, dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
var err error
err = DeleteSnapshotWithoutWaiting(dc, ns, snapshotName)
err = DeleteSnapshotWithoutWaiting(ctx, dc, ns, snapshotName)
if err != nil {
return err
}
ginkgo.By("checking the Snapshot has been deleted")
err = WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout)
err = WaitForNamespacedGVRDeletion(ctx, dc, SnapshotGVR, ns, snapshotName, poll, timeout)
return err
}

View File

@ -75,7 +75,7 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
}
// getKubeletMainPid return the Main PID of the Kubelet Process
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
func getKubeletMainPid(ctx context.Context, nodeIP string, sudoPresent bool, systemctlPresent bool) string {
command := ""
if systemctlPresent {
command = "systemctl status kubelet | grep 'Main PID'"
@ -86,7 +86,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
command = fmt.Sprintf("sudo %s", command)
}
framework.Logf("Attempting `%s`", command)
sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
sshResult, err := e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
e2essh.LogResult(sshResult)
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
@ -95,7 +95,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
byteLen := 64
seed := time.Now().UTC().UnixNano()
@ -103,7 +103,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
KubeletCommand(ctx, KRestart, c, clientPod)
ginkgo.By("Testing that written file is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
@ -112,7 +112,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
}
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
byteLen := 64
seed := time.Now().UTC().UnixNano()
@ -120,7 +120,7 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
ginkgo.By("Restarting kubelet")
KubeletCommand(KRestart, c, clientPod)
KubeletCommand(ctx, KRestart, c, clientPod)
ginkgo.By("Testing that written pv is accessible.")
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
@ -132,20 +132,20 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
// forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked.
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
nodeIP, err := getHostAddress(c, clientPod)
func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
nodeIP, err := getHostAddress(ctx, c, clientPod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
ginkgo.By("Expecting the volume mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount to be found.")
result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
@ -157,11 +157,9 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
KubeletCommand(ctx, KStop, c, clientPod)
if secondPod != nil {
ginkgo.By("Starting the second pod")
@ -171,15 +169,15 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{})
}
framework.ExpectNoError(err)
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
KubeletCommand(ctx, KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
if err != nil {
framework.ExpectNoError(err, "Expected pod to be not found.")
}
@ -192,7 +190,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
if secondPod != nil {
ginkgo.By("Waiting for the second pod.")
err = e2epod.WaitForPodRunningInNamespace(c, secondPod)
err = e2epod.WaitForPodRunningInNamespace(ctx, c, secondPod)
framework.ExpectNoError(err, "while waiting for the second pod Running")
ginkgo.By("Getting the second pod uuid.")
@ -200,7 +198,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
framework.ExpectNoError(err, "getting the second UID")
ginkgo.By("Expecting the volume mount to be found in the second pod.")
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
@ -209,12 +207,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "when deleting the second pod")
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "when waiting for the second pod to disappear")
}
ginkgo.By("Expecting the volume mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
result, err = e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
@ -222,7 +220,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
result, err = e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
@ -232,64 +230,62 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil, volumePath)
func TestVolumeUnmountsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, false, nil, volumePath)
}
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil, volumePath)
func TestVolumeUnmountsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, false, nil, volumePath)
}
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
nodeIP, err := getHostAddress(c, clientPod)
func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
nodeIP, err := getHostAddress(ctx, c, clientPod)
framework.ExpectNoError(err, "Failed to get nodeIP.")
nodeIP = nodeIP + ":22"
// Creating command to check whether path exists
podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) {
podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd)
}
// Directories in the global directory have unpredictable names, however, device symlinks
// have the same name as pod.UID. So just find anything with pod.UID name.
globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID)
if isSudoPresent(nodeIP, framework.TestContext.Provider) {
if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) {
globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd)
}
ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
result, err := e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Expecting the symlinks from global map path to be found.")
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
}()
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
KubeletCommand(ctx, KStop, c, clientPod)
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0))
} else {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, metav1.DeleteOptions{})
err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{})
}
framework.ExpectNoError(err, "Failed to delete pod.")
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
KubeletCommand(ctx, KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "Expected pod to be not found.")
if forceDelete {
@ -299,13 +295,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
}
ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
result, err = e2essh.SSH(podDirectoryCmd, nodeIP, framework.TestContext.Provider)
result, err = e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
ginkgo.By("Expecting the symlinks from global map path not to be found.")
result, err = e2essh.SSH(globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.")
@ -314,17 +310,17 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
}
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false, devicePath)
func TestVolumeUnmapsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, devicePath)
}
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true, devicePath)
func TestVolumeUnmapsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, devicePath)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -362,16 +358,14 @@ func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns,
},
},
}
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err)
defer func() {
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow))
}
// StartExternalProvisioner create external provisioner pod
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
func StartExternalProvisioner(ctx context.Context, c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
@ -432,21 +426,21 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa
},
},
}
provisionerPod, err := podClient.Create(context.TODO(), provisionerPod, metav1.CreateOptions{})
provisionerPod, err := podClient.Create(ctx, provisionerPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, c, provisionerPod))
ginkgo.By("locating the provisioner pod")
pod, err := podClient.Get(context.TODO(), provisionerPod.Name, metav1.GetOptions{})
pod, err := podClient.Get(ctx, provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
func isSudoPresent(nodeIP string, provider string) bool {
func isSudoPresent(ctx context.Context, nodeIP string, provider string) bool {
framework.Logf("Checking if sudo command is present")
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
sshResult, err := e2essh.SSH(ctx, "sudo --version", nodeIP, provider)
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
if !strings.Contains(sshResult.Stderr, "command not found") {
return true
@ -562,8 +556,8 @@ func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int {
}
// findMountPoints returns all mount points on given node under specified directory.
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
func findMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node, dir string) []string {
result, err := hostExec.IssueCommandWithResult(ctx, fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
framework.ExpectNoError(err, "Encountered HostExec error.")
var mountPoints []string
if err != nil {
@ -578,16 +572,16 @@ func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
}
// FindVolumeGlobalMountPoints returns all volume global mount points on the node of given pod.
func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
func FindVolumeGlobalMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node) sets.String {
return sets.NewString(findMountPoints(ctx, hostExec, node, "/var/lib/kubelet/plugins")...)
}
// CreateDriverNamespace creates a namespace for CSI driver installation.
// The namespace is still tracked and ensured that gets deleted when test terminates.
func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
func CreateDriverNamespace(ctx context.Context, f *framework.Framework) *v1.Namespace {
ginkgo.By(fmt.Sprintf("Building a driver namespace object, basename %s", f.Namespace.Name))
// The driver namespace will be bound to the test namespace in the prefix
namespace, err := f.CreateNamespace(f.Namespace.Name, map[string]string{
namespace, err := f.CreateNamespace(ctx, f.Namespace.Name, map[string]string{
"e2e-framework": f.BaseName,
"e2e-test-namespace": f.Namespace.Name,
})
@ -595,7 +589,7 @@ func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
if framework.TestContext.VerifyServiceAccount {
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name)
framework.ExpectNoError(err)
} else {
framework.Logf("Skipping waiting for service account")
@ -604,11 +598,11 @@ func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
}
// WaitForGVRDeletion waits until a non-namespaced object has been deleted
func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error {
func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
if successful := WaitUntil(poll, timeout, func() bool {
_, err := c.Resource(gvr).Get(context.TODO(), objectName, metav1.GetOptions{})
_, err := c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
framework.Logf("%s %v is not found and has been deleted", gvr.Resource, objectName)
return true
@ -627,11 +621,11 @@ func WaitForGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ob
}
// WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted
func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
if successful := WaitUntil(poll, timeout, func() bool {
_, err := c.Resource(gvr).Namespace(ns).Get(context.TODO(), objectName, metav1.GetOptions{})
_, err := c.Resource(gvr).Namespace(ns).Get(ctx, objectName, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
framework.Logf("%s %s is not found in namespace %s and has been deleted", gvr.Resource, objectName, ns)
return true
@ -651,6 +645,7 @@ func WaitForNamespacedGVRDeletion(c dynamic.Interface, gvr schema.GroupVersionRe
// WaitUntil runs checkDone until a timeout is reached
func WaitUntil(poll, timeout time.Duration, checkDone func() bool) bool {
// TODO (pohly): replace with gomega.Eventually
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
if checkDone() {
framework.Logf("WaitUntil finished successfully after %v", time.Since(start))
@ -716,8 +711,8 @@ func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string,
}
// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func DeleteStorageClass(cs clientset.Interface, className string) error {
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
func DeleteStorageClass(ctx context.Context, cs clientset.Interface, className string) error {
err := cs.StorageV1().StorageClasses().Delete(ctx, className, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return err
}

View File

@ -22,7 +22,7 @@ import (
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
)
//go:embed cluster-dns flexvolume guestbook kubectl sample-device-plugin.yaml scheduling/nvidia-driver-installer.yaml statefulset storage-csi
//go:embed cluster-dns flexvolume guestbook kubectl sample-device-plugin scheduling/nvidia-driver-installer.yaml statefulset storage-csi
var e2eTestingManifestsFS embed.FS
func GetE2ETestingManifestsFS() e2etestfiles.EmbeddedFileSource {

View File

@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: sample-device-plugin-beta
namespace: kube-system
labels:
k8s-app: sample-device-plugin
spec:
selector:
matchLabels:
k8s-app: sample-device-plugin
template:
metadata:
labels:
k8s-app: sample-device-plugin
annotations:
spec:
priorityClassName: system-node-critical
tolerations:
- operator: "Exists"
effect: "NoExecute"
- operator: "Exists"
effect: "NoSchedule"
volumes:
- name: device-plugin
hostPath:
path: /var/lib/kubelet/device-plugins
- name: plugins-registry-probe-mode
hostPath:
path: /var/lib/kubelet/plugins_registry
- name: dev
hostPath:
path: /dev
containers:
- image: registry.k8s.io/e2e-test-images/sample-device-plugin:1.5
name: sample-device-plugin
env:
- name: PLUGIN_SOCK_DIR
value: "/var/lib/kubelet/device-plugins"
- name: REGISTER_CONTROL_FILE
value: "/var/lib/kubelet/device-plugins/sample/registration"
securityContext:
privileged: true
volumeMounts:
- name: device-plugin
mountPath: /var/lib/kubelet/device-plugins
- name: plugins-registry-probe-mode
mountPath: /var/lib/kubelet/plugins_registry
- name: dev
mountPath: /dev
updateStrategy:
type: RollingUpdate

View File

@ -39,7 +39,7 @@ spec:
- name: socket-dir
mountPath: /csi
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
args:
- "--v=5"
- "--csi-address=/csi/csi.sock"

View File

@ -218,7 +218,7 @@ spec:
serviceAccountName: csi-hostpathplugin-sa
containers:
- name: hostpath
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
image: registry.k8s.io/sig-storage/hostpathplugin:v1.11.0
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"
@ -323,7 +323,7 @@ spec:
name: socket-dir
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
args:
- -v=5
- --csi-address=/csi/csi.sock

View File

@ -15,7 +15,7 @@ spec:
serviceAccountName: csi-mock
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
args:
- "--csi-address=$(ADDRESS)"
# Topology support is needed for the pod rescheduling test

View File

@ -15,7 +15,7 @@ spec:
serviceAccountName: csi-mock
containers:
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
args:
- "--csi-address=$(ADDRESS)"
# Topology support is needed for the pod rescheduling test

View File

@ -53,8 +53,13 @@ type AuditEvent struct {
// not reference these maps after calling the Check functions.
AdmissionWebhookMutationAnnotations map[string]string
AdmissionWebhookPatchAnnotations map[string]string
// Only populated when a filter is provided to testEventFromInternalFiltered
CustomAuditAnnotations map[string]string
}
type AuditAnnotationsFilter func(key, val string) bool
// MissingEventsReport provides an analysis if any events are missing
type MissingEventsReport struct {
FirstEventChecked *auditinternal.Event
@ -78,6 +83,13 @@ func (m *MissingEventsReport) String() string {
// CheckAuditLines searches the audit log for the expected audit lines.
func CheckAuditLines(stream io.Reader, expected []AuditEvent, version schema.GroupVersion) (missingReport *MissingEventsReport, err error) {
return CheckAuditLinesFiltered(stream, expected, version, nil)
}
// CheckAuditLinesFiltered searches the audit log for the expected audit lines, customAnnotationsFilter
// controls which audit annotations are added to AuditEvent.CustomAuditAnnotations.
// If the customAnnotationsFilter is nil, AuditEvent.CustomAuditAnnotations will be empty.
func CheckAuditLinesFiltered(stream io.Reader, expected []AuditEvent, version schema.GroupVersion, customAnnotationsFilter AuditAnnotationsFilter) (missingReport *MissingEventsReport, err error) {
expectations := newAuditEventTracker(expected)
scanner := bufio.NewScanner(stream)
@ -100,7 +112,7 @@ func CheckAuditLines(stream io.Reader, expected []AuditEvent, version schema.Gro
}
missingReport.LastEventChecked = e
event, err := testEventFromInternal(e)
event, err := testEventFromInternalFiltered(e, customAnnotationsFilter)
if err != nil {
return missingReport, err
}
@ -162,6 +174,13 @@ func CheckForDuplicates(el auditinternal.EventList) (auditinternal.EventList, er
// testEventFromInternal takes an internal audit event and returns a test event
func testEventFromInternal(e *auditinternal.Event) (AuditEvent, error) {
return testEventFromInternalFiltered(e, nil)
}
// testEventFromInternalFiltered takes an internal audit event and returns a test event, customAnnotationsFilter
// controls which audit annotations are added to AuditEvent.CustomAuditAnnotations.
// If the customAnnotationsFilter is nil, AuditEvent.CustomAuditAnnotations will be empty.
func testEventFromInternalFiltered(e *auditinternal.Event, customAnnotationsFilter AuditAnnotationsFilter) (AuditEvent, error) {
event := AuditEvent{
Level: e.Level,
Stage: e.Stage,
@ -199,6 +218,11 @@ func testEventFromInternal(e *auditinternal.Event) (AuditEvent, error) {
event.AdmissionWebhookMutationAnnotations = map[string]string{}
}
event.AdmissionWebhookMutationAnnotations[k] = v
} else if customAnnotationsFilter != nil && customAnnotationsFilter(k, v) {
if event.CustomAuditAnnotations == nil {
event.CustomAuditAnnotations = map[string]string{}
}
event.CustomAuditAnnotations[k] = v
}
}
return event, nil

80
vendor/k8s.io/kubernetes/test/utils/format/format.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package format is an extension of Gomega's format package which
// improves printing of objects that can be serialized well as YAML,
// like the structs in the Kubernetes API.
//
// Just importing it is enough to activate this special YAML support
// in Gomega.
package format
import (
"reflect"
"strings"
"github.com/onsi/gomega/format"
"sigs.k8s.io/yaml"
)
func init() {
format.RegisterCustomFormatter(handleYAML)
}
// Object makes Gomega's [format.Object] available without having to import that
// package.
func Object(object interface{}, indentation uint) string {
return format.Object(object, indentation)
}
// handleYAML formats all values as YAML where the result
// is likely to look better as YAML:
// - pointer to struct or struct where all fields
// have `json` tags
// - slices containing such a value
// - maps where the key or value are such a value
func handleYAML(object interface{}) (string, bool) {
value := reflect.ValueOf(object)
if !useYAML(value.Type()) {
return "", false
}
y, err := yaml.Marshal(object)
if err != nil {
return "", false
}
return "\n" + strings.TrimSpace(string(y)), true
}
func useYAML(t reflect.Type) bool {
switch t.Kind() {
case reflect.Pointer, reflect.Slice, reflect.Array:
return useYAML(t.Elem())
case reflect.Map:
return useYAML(t.Key()) || useYAML(t.Elem())
case reflect.Struct:
// All fields must have a `json` tag.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if _, ok := field.Tag.Lookup("json"); !ok {
return false
}
}
return true
default:
return false
}
}

View File

@ -68,12 +68,16 @@ func (i *Config) SetVersion(version string) {
i.version = version
}
func initReg() RegistryList {
func Init(repoList string) {
registry, imageConfigs, originalImageConfigs = readRepoList(repoList)
}
func readRepoList(repoList string) (RegistryList, map[ImageID]Config, map[ImageID]Config) {
registry := initRegistry
repoList := os.Getenv("KUBE_TEST_REPO_LIST")
if repoList == "" {
return registry
imageConfigs, originalImageConfigs := initImageConfigs(registry)
return registry, imageConfigs, originalImageConfigs
}
var fileContent []byte
@ -94,9 +98,13 @@ func initReg() RegistryList {
err = yaml.Unmarshal(fileContent, &registry)
if err != nil {
panic(fmt.Errorf("Error unmarshalling '%v' YAML file: %v", repoList, err))
panic(fmt.Errorf("error unmarshalling '%v' YAML file: %v", repoList, err))
}
return registry
imageConfigs, originalImageConfigs := initImageConfigs(registry)
return registry, imageConfigs, originalImageConfigs
}
// Essentially curl url | writer
@ -135,10 +143,7 @@ var (
CloudProviderGcpRegistry: "registry.k8s.io/cloud-provider-gcp",
}
registry = initReg()
// Preconfigured image configs
imageConfigs, originalImageConfigs = initImageConfigs(registry)
registry, imageConfigs, originalImageConfigs = readRepoList(os.Getenv("KUBE_TEST_REPO_LIST"))
)
type ImageID int
@ -240,8 +245,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.1.2"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.6-0"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.3"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.7-0"}
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}

View File

@ -49,6 +49,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/utils/pointer"
"k8s.io/klog/v2"
)
@ -65,17 +66,17 @@ func removePtr(replicas *int32) int32 {
return *replicas
}
func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) {
func WaitUntilPodIsScheduled(ctx context.Context, c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) {
// Wait until it's scheduled
p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"})
p, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{ResourceVersion: "0"})
if err == nil && p.Spec.NodeName != "" {
return p, nil
}
pollingPeriod := 200 * time.Millisecond
startTime := time.Now()
for startTime.Add(timeout).After(time.Now()) {
for startTime.Add(timeout).After(time.Now()) && ctx.Err() == nil {
time.Sleep(pollingPeriod)
p, err := c.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{ResourceVersion: "0"})
p, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{ResourceVersion: "0"})
if err == nil && p.Spec.NodeName != "" {
return p, nil
}
@ -83,13 +84,13 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time
return nil, fmt.Errorf("timed out after %v when waiting for pod %v/%v to start", timeout, namespace, name)
}
func RunPodAndGetNodeName(c clientset.Interface, pod *v1.Pod, timeout time.Duration) (string, error) {
func RunPodAndGetNodeName(ctx context.Context, c clientset.Interface, pod *v1.Pod, timeout time.Duration) (string, error) {
name := pod.Name
namespace := pod.Namespace
if err := CreatePodWithRetries(c, namespace, pod); err != nil {
return "", err
}
p, err := WaitUntilPodIsScheduled(c, name, namespace, timeout)
p, err := WaitUntilPodIsScheduled(ctx, c, name, namespace, timeout)
if err != nil {
return "", err
}
@ -173,8 +174,8 @@ type RCConfig struct {
LogFunc func(fmt string, args ...interface{})
// If set those functions will be used to gather data from Nodes - in integration tests where no
// kubelets are running those variables should be nil.
NodeDumpFunc func(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{}))
ContainerDumpFunc func(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{}))
NodeDumpFunc func(ctx context.Context, c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{}))
ContainerDumpFunc func(ctx context.Context, c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{}))
// Names of the secrets and configmaps to mount.
SecretNames []string
@ -288,16 +289,16 @@ func Diff(oldPods []*v1.Pod, curPods []*v1.Pod) PodDiff {
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunDeployment(config DeploymentConfig) error {
func RunDeployment(ctx context.Context, config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
return config.start(ctx)
}
func (config *DeploymentConfig) Run() error {
return RunDeployment(*config)
func (config *DeploymentConfig) Run(ctx context.Context) error {
return RunDeployment(ctx, *config)
}
func (config *DeploymentConfig) GetKind() schema.GroupKind {
@ -318,7 +319,7 @@ func (config *DeploymentConfig) create() error {
Name: config.Name,
},
Spec: apps.DeploymentSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Replicas: pointer.Int32(int32(config.Replicas)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
@ -374,16 +375,16 @@ func (config *DeploymentConfig) create() error {
// and waits until all the pods it launches to reach the "Running" state.
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunReplicaSet(config ReplicaSetConfig) error {
func RunReplicaSet(ctx context.Context, config ReplicaSetConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
return config.start(ctx)
}
func (config *ReplicaSetConfig) Run() error {
return RunReplicaSet(*config)
func (config *ReplicaSetConfig) Run(ctx context.Context) error {
return RunReplicaSet(ctx, *config)
}
func (config *ReplicaSetConfig) GetKind() schema.GroupKind {
@ -404,7 +405,7 @@ func (config *ReplicaSetConfig) create() error {
Name: config.Name,
},
Spec: apps.ReplicaSetSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Replicas: pointer.Int32(int32(config.Replicas)),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
@ -456,16 +457,16 @@ func (config *ReplicaSetConfig) create() error {
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunJob(config JobConfig) error {
func RunJob(ctx context.Context, config JobConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
return config.start(ctx)
}
func (config *JobConfig) Run() error {
return RunJob(*config)
func (config *JobConfig) Run(ctx context.Context) error {
return RunJob(ctx, *config)
}
func (config *JobConfig) GetKind() schema.GroupKind {
@ -486,8 +487,8 @@ func (config *JobConfig) create() error {
Name: config.Name,
},
Spec: batch.JobSpec{
Parallelism: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Completions: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Parallelism: pointer.Int32(int32(config.Replicas)),
Completions: pointer.Int32(int32(config.Replicas)),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": config.Name},
@ -530,16 +531,16 @@ func (config *JobConfig) create() error {
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunRC(config RCConfig) error {
func RunRC(ctx context.Context, config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
return config.start(ctx)
}
func (config *RCConfig) Run() error {
return RunRC(*config)
func (config *RCConfig) Run(ctx context.Context) error {
return RunRC(ctx, *config)
}
func (config *RCConfig) GetName() string {
@ -598,7 +599,7 @@ func (config *RCConfig) create() error {
Name: config.Name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(config.Replicas),
Replicas: pointer.Int32(int32(config.Replicas)),
Selector: map[string]string{
"name": config.Name,
},
@ -776,7 +777,7 @@ func ComputeRCStartupStatus(pods []*v1.Pod, expected int) RCStartupStatus {
return startupStatus
}
func (config *RCConfig) start() error {
func (config *RCConfig) start(ctx context.Context) error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
@ -824,11 +825,11 @@ func (config *RCConfig) start() error {
if startupStatus.FailedContainers > maxContainerFailures {
if config.NodeDumpFunc != nil {
config.NodeDumpFunc(config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog)
config.NodeDumpFunc(ctx, config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog)
}
if config.ContainerDumpFunc != nil {
// Get the logs from the failed containers to help diagnose what caused them to fail
config.ContainerDumpFunc(config.Client, config.Namespace, config.RCConfigLog)
config.ContainerDumpFunc(ctx, config.Client, config.Namespace, config.RCConfigLog)
}
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
}
@ -858,7 +859,7 @@ func (config *RCConfig) start() error {
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := metav1.ListOptions{LabelSelector: label.String()}
if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(context.TODO(), options); err == nil {
if pods, err := config.Client.CoreV1().Pods(config.Namespace).List(ctx, options); err == nil {
for _, pod := range pods.Items {
config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
@ -946,8 +947,8 @@ type CountToStrategy struct {
}
type TestNodePreparer interface {
PrepareNodes(nextNodeIndex int) error
CleanupNodes() error
PrepareNodes(ctx context.Context, nextNodeIndex int) error
CleanupNodes(ctx context.Context) error
}
type PrepareNodeStrategy interface {
@ -955,12 +956,12 @@ type PrepareNodeStrategy interface {
PreparePatch(node *v1.Node) []byte
// Create or modify any objects that depend on the node before the test starts.
// Caller will re-try when http.StatusConflict error is returned.
PrepareDependentObjects(node *v1.Node, client clientset.Interface) error
PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error
// Clean up any node modifications after the test finishes.
CleanupNode(node *v1.Node) *v1.Node
CleanupNode(ctx context.Context, node *v1.Node) *v1.Node
// Clean up any objects that depend on the node after the test finishes.
// Caller will re-try when http.StatusConflict error is returned.
CleanupDependentObjects(nodeName string, client clientset.Interface) error
CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error
}
type TrivialNodePrepareStrategy struct{}
@ -971,16 +972,16 @@ func (*TrivialNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
return []byte{}
}
func (*TrivialNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
func (*TrivialNodePrepareStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node {
nodeCopy := *node
return &nodeCopy
}
func (*TrivialNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
func (*TrivialNodePrepareStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error {
return nil
}
func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
func (*TrivialNodePrepareStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error {
return nil
}
@ -1009,7 +1010,7 @@ func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
return []byte(patch)
}
func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
func (s *LabelNodePrepareStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node {
nodeCopy := node.DeepCopy()
if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 {
delete(nodeCopy.Labels, s.LabelKey)
@ -1017,11 +1018,11 @@ func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
return nodeCopy
}
func (*LabelNodePrepareStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
func (*LabelNodePrepareStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error {
return nil
}
func (*LabelNodePrepareStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
func (*LabelNodePrepareStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error {
return nil
}
@ -1069,7 +1070,7 @@ func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte {
return patch
}
func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node {
func (s *NodeAllocatableStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node {
nodeCopy := node.DeepCopy()
for name := range s.NodeAllocatable {
delete(nodeCopy.Status.Allocatable, name)
@ -1077,7 +1078,7 @@ func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node {
return nodeCopy
}
func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientset.Interface) error {
func (s *NodeAllocatableStrategy) createCSINode(ctx context.Context, nodeName string, client clientset.Interface) error {
csiNode := &storagev1.CSINode{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
@ -1099,7 +1100,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
}
_, err := client.StorageV1().CSINodes().Create(context.TODO(), csiNode, metav1.CreateOptions{})
_, err := client.StorageV1().CSINodes().Create(ctx, csiNode, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
// Something created CSINode instance after we checked it did not exist.
// Make the caller to re-try PrepareDependentObjects by returning Conflict error
@ -1108,7 +1109,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
return err
}
func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1.CSINode, client clientset.Interface) error {
func (s *NodeAllocatableStrategy) updateCSINode(ctx context.Context, csiNode *storagev1.CSINode, client clientset.Interface) error {
for driverName, allocatable := range s.CsiNodeAllocatable {
found := false
for i, driver := range csiNode.Spec.Drivers {
@ -1129,23 +1130,23 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1.CSINode, clie
}
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",")
_, err := client.StorageV1().CSINodes().Update(context.TODO(), csiNode, metav1.UpdateOptions{})
_, err := client.StorageV1().CSINodes().Update(ctx, csiNode, metav1.UpdateOptions{})
return err
}
func (s *NodeAllocatableStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
func (s *NodeAllocatableStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error {
csiNode, err := client.StorageV1().CSINodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return s.createCSINode(node.Name, client)
return s.createCSINode(ctx, node.Name, client)
}
return err
}
return s.updateCSINode(csiNode, client)
return s.updateCSINode(ctx, csiNode, client)
}
func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
csiNode, err := client.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
func (s *NodeAllocatableStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error {
csiNode, err := client.StorageV1().CSINodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil
@ -1160,7 +1161,7 @@ func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, clien
}
}
}
return s.updateCSINode(csiNode, client)
return s.updateCSINode(ctx, csiNode, client)
}
// UniqueNodeLabelStrategy sets a unique label for each node.
@ -1182,7 +1183,7 @@ func (s *UniqueNodeLabelStrategy) PreparePatch(*v1.Node) []byte {
return []byte(patch)
}
func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node {
func (s *UniqueNodeLabelStrategy) CleanupNode(ctx context.Context, node *v1.Node) *v1.Node {
nodeCopy := node.DeepCopy()
if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 {
delete(nodeCopy.Labels, s.LabelKey)
@ -1190,22 +1191,22 @@ func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node {
return nodeCopy
}
func (*UniqueNodeLabelStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
func (*UniqueNodeLabelStrategy) PrepareDependentObjects(ctx context.Context, node *v1.Node, client clientset.Interface) error {
return nil
}
func (*UniqueNodeLabelStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
func (*UniqueNodeLabelStrategy) CleanupDependentObjects(ctx context.Context, nodeName string, client clientset.Interface) error {
return nil
}
func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error {
func DoPrepareNode(ctx context.Context, client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error {
var err error
patch := strategy.PreparePatch(node)
if len(patch) == 0 {
return nil
}
for attempt := 0; attempt < retries; attempt++ {
if _, err = client.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil {
if _, err = client.CoreV1().Nodes().Patch(ctx, node.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}); err == nil {
break
}
if !apierrors.IsConflict(err) {
@ -1218,7 +1219,7 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
}
for attempt := 0; attempt < retries; attempt++ {
if err = strategy.PrepareDependentObjects(node, client); err == nil {
if err = strategy.PrepareDependentObjects(ctx, node, client); err == nil {
break
}
if !apierrors.IsConflict(err) {
@ -1232,19 +1233,19 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
return nil
}
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
func DoCleanupNode(ctx context.Context, client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
var err error
for attempt := 0; attempt < retries; attempt++ {
var node *v1.Node
node, err = client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
node, err = client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
}
updatedNode := strategy.CleanupNode(node)
updatedNode := strategy.CleanupNode(ctx, node)
if apiequality.Semantic.DeepEqual(node, updatedNode) {
return nil
}
if _, err = client.CoreV1().Nodes().Update(context.TODO(), updatedNode, metav1.UpdateOptions{}); err == nil {
if _, err = client.CoreV1().Nodes().Update(ctx, updatedNode, metav1.UpdateOptions{}); err == nil {
break
}
if !apierrors.IsConflict(err) {
@ -1257,7 +1258,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
}
for attempt := 0; attempt < retries; attempt++ {
err = strategy.CleanupDependentObjects(nodeName, client)
err = strategy.CleanupDependentObjects(ctx, nodeName, client)
if err == nil {
break
}
@ -1272,7 +1273,7 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
return nil
}
type TestPodCreateStrategy func(client clientset.Interface, namespace string, podCount int) error
type TestPodCreateStrategy func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error
type CountToPodStrategy struct {
Count int
@ -1304,10 +1305,10 @@ func NewTestPodCreator(client clientset.Interface, config *TestPodCreatorConfig)
}
}
func (c *TestPodCreator) CreatePods() error {
func (c *TestPodCreator) CreatePods(ctx context.Context) error {
for ns, v := range *(c.Config) {
for _, countToStrategy := range v {
if err := countToStrategy.Strategy(c.Client, ns, countToStrategy.Count); err != nil {
if err := countToStrategy.Strategy(ctx, c.Client, ns, countToStrategy.Count); err != nil {
return err
}
}
@ -1342,11 +1343,14 @@ func makeCreatePod(client clientset.Interface, namespace string, podTemplate *v1
return nil
}
func CreatePod(client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error {
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error {
var createError error
lock := sync.Mutex{}
createPodFunc := func(i int) {
if err := makeCreatePod(client, namespace, podTemplate); err != nil {
// client-go writes into the object that is passed to Create,
// causing a data race unless we create a new copy for each
// parallel call.
if err := makeCreatePod(client, namespace, podTemplate.DeepCopy()); err != nil {
lock.Lock()
defer lock.Unlock()
createError = err
@ -1354,14 +1358,14 @@ func CreatePod(client clientset.Interface, namespace string, podCount int, podTe
}
if podCount < 30 {
workqueue.ParallelizeUntil(context.TODO(), podCount, podCount, createPodFunc)
workqueue.ParallelizeUntil(ctx, podCount, podCount, createPodFunc)
} else {
workqueue.ParallelizeUntil(context.TODO(), 30, podCount, createPodFunc)
workqueue.ParallelizeUntil(ctx, 30, podCount, createPodFunc)
}
return createError
}
func CreatePodWithPersistentVolume(client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error {
func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error {
var createError error
lock := sync.Mutex{}
createPodFunc := func(i int) {
@ -1400,7 +1404,7 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string,
}
// We need to update statuses separately, as creating pv/pvc resets status to the default one.
if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(context.TODO(), pvc, metav1.UpdateOptions{}); err != nil {
if _, err := client.CoreV1().PersistentVolumeClaims(namespace).UpdateStatus(ctx, pvc, metav1.UpdateOptions{}); err != nil {
lock.Lock()
defer lock.Unlock()
createError = fmt.Errorf("error updating PVC status: %s", err)
@ -1414,7 +1418,7 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string,
return
}
// We need to update statuses separately, as creating pv/pvc resets status to the default one.
if _, err := client.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), pv, metav1.UpdateOptions{}); err != nil {
if _, err := client.CoreV1().PersistentVolumes().UpdateStatus(ctx, pv, metav1.UpdateOptions{}); err != nil {
lock.Lock()
defer lock.Unlock()
createError = fmt.Errorf("error updating PV status: %s", err)
@ -1442,9 +1446,9 @@ func CreatePodWithPersistentVolume(client clientset.Interface, namespace string,
}
if count < 30 {
workqueue.ParallelizeUntil(context.TODO(), count, count, createPodFunc)
workqueue.ParallelizeUntil(ctx, count, count, createPodFunc)
} else {
workqueue.ParallelizeUntil(context.TODO(), 30, count, createPodFunc)
workqueue.ParallelizeUntil(ctx, 30, count, createPodFunc)
}
return createError
}
@ -1455,7 +1459,7 @@ func createController(client clientset.Interface, controllerName, namespace stri
Name: controllerName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(podCount),
Replicas: pointer.Int32(int32(podCount)),
Selector: map[string]string{"name": controllerName},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@ -1472,8 +1476,8 @@ func createController(client clientset.Interface, controllerName, namespace stri
}
func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
return func(client clientset.Interface, namespace string, podCount int) error {
return CreatePod(client, namespace, podCount, podTemplate)
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
return CreatePod(ctx, client, namespace, podCount, podTemplate)
}
}
@ -1481,8 +1485,8 @@ func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
type volumeFactory func(uniqueID int) *v1.PersistentVolume
func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
return func(client clientset.Interface, namespace string, podCount int) error {
return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */)
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */)
}
}
@ -1501,7 +1505,7 @@ func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeC
}
func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
return func(client clientset.Interface, namespace string, podCount int) error {
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
volumeBindingMode := storagev1.VolumeBindingWaitForFirstConsumer
storageClass := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
@ -1522,7 +1526,7 @@ func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFac
return pv
}
return CreatePodWithPersistentVolume(client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */)
return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */)
}
}
@ -1537,7 +1541,7 @@ func NewSimpleCreatePodStrategy() TestPodCreateStrategy {
}
func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCreateStrategy {
return func(client clientset.Interface, namespace string, podCount int) error {
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
basePod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: controllerName + "-pod-",
@ -1548,7 +1552,7 @@ func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCrea
if err := createController(client, controllerName, namespace, podCount, basePod); err != nil {
return err
}
return CreatePod(client, namespace, podCount, basePod)
return CreatePod(ctx, client, namespace, podCount, basePod)
}
}
@ -1739,7 +1743,7 @@ type DaemonConfig struct {
Timeout time.Duration
}
func (config *DaemonConfig) Run() error {
func (config *DaemonConfig) Run(ctx context.Context) error {
if config.Image == "" {
config.Image = "registry.k8s.io/pause:3.9"
}
@ -1775,7 +1779,7 @@ func (config *DaemonConfig) Run() error {
var err error
for i := 0; i < retries; i++ {
// Wait for all daemons to be running
nodes, err = config.Client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
nodes, err = config.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
if err == nil {
break
} else if i+1 == retries {