mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -8,6 +8,7 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/api/v1/service
|
||||
- k8s.io/kubernetes/pkg/api/pod
|
||||
- k8s.io/kubernetes/pkg/api/node
|
||||
- k8s.io/kubernetes/pkg/api/persistentvolumeclaim
|
||||
- k8s.io/kubernetes/pkg/apis/apps
|
||||
- k8s.io/kubernetes/pkg/apis/apps/validation
|
||||
- k8s.io/kubernetes/pkg/apis/autoscaling
|
||||
@ -168,7 +169,6 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/proxy/ipvs
|
||||
- k8s.io/kubernetes/pkg/proxy/metaproxier
|
||||
- k8s.io/kubernetes/pkg/proxy/metrics
|
||||
- k8s.io/kubernetes/pkg/proxy/userspace
|
||||
- k8s.io/kubernetes/pkg/proxy/util
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/allocator
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
|
||||
|
88
vendor/k8s.io/kubernetes/test/e2e/framework/README.md
generated
vendored
Normal file
88
vendor/k8s.io/kubernetes/test/e2e/framework/README.md
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
# Overview
|
||||
|
||||
The Kubernetes E2E framework simplifies writing Ginkgo tests suites. It's main
|
||||
usage is for these tests suites in the Kubernetes repository itself:
|
||||
- test/e2e: runs as client for a Kubernetes cluster. The e2e.test binary is
|
||||
used for conformance testing.
|
||||
- test/e2e_node: runs on the same node as a kublet instance. Used for testing
|
||||
kubelet.
|
||||
- test/e2e_kubeadm: test suite for kubeadm.
|
||||
|
||||
Usage of the framework outside of Kubernetes is possible, but not encouraged.
|
||||
Downstream users have to be prepared to deal with API changes.
|
||||
|
||||
# Code Organization
|
||||
|
||||
The core framework is the `k8s.io/kubernetes/test/e2e/framework` package. It
|
||||
contains functionality that all E2E suites are expected to need:
|
||||
- connecting to the apiserver
|
||||
- managing per-test namespaces
|
||||
- logging (`Logf`)
|
||||
- failure handling (`Fail`, `Failf`)
|
||||
- writing concise JUnit test results
|
||||
|
||||
It also contains a `TestContext` with settings that can be controlled via
|
||||
command line flags. For historic reasons, this also contains settings for
|
||||
individual tests or packages that are not part of the core framework.
|
||||
|
||||
Optional functionality is placed in sub packages like
|
||||
`test/e2e/framework/pod`. The core framework does not depend on those. Sub
|
||||
packages may depend on the core framework.
|
||||
|
||||
The advantages of splitting the code like this are:
|
||||
- leaner go doc packages by grouping related functions together
|
||||
- not forcing all E2E suites to import all functionality
|
||||
- avoiding import cycles
|
||||
|
||||
# Execution Flow
|
||||
|
||||
When a test suite gets invoked, the top-level `Describe` calls register the
|
||||
callbacks that define individual tests, but does not invoke them yet. After
|
||||
that init phase, command line flags are parsed and the `Describe` callbacks are
|
||||
invoked. Those then define the actual tests for the test suite. Command line
|
||||
flags can be used to influence the test definitions.
|
||||
|
||||
Now `Context/BeforeEach/AfterEach/It` define code that will be called later
|
||||
when executing a specific test. During this setup phase, `f :=
|
||||
framework.NewDefaultFramework("some tests")` creates a `Framework` instance for
|
||||
one or more tests. `NewDefaultFramework` initializes that instance anew for
|
||||
each test with a `BeforeEach` callback. Starting with Kubernetes 1.26, that
|
||||
instance gets cleaned up after all other code for a test has been invoked, so
|
||||
the following code is correct:
|
||||
|
||||
```
|
||||
f := framework.NewDefaultFramework("some tests")
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
# Do something with f.ClientSet.
|
||||
}
|
||||
|
||||
ginkgo.It("test something", func() {
|
||||
# The actual test.
|
||||
})
|
||||
```
|
||||
|
||||
Optional functionality can be injected into each test by adding a callback to
|
||||
`NewFrameworkExtensions` in an init function. `NewDefaultFramework` will invoke
|
||||
those callbacks as if the corresponding code had been added to each test like this:
|
||||
|
||||
```
|
||||
f := framework.NewDefaultFramework("some tests")
|
||||
|
||||
optional.SomeCallback(f)
|
||||
```
|
||||
|
||||
`SomeCallback` then can register additional `BeforeEach` or `AfterEach`
|
||||
callbacks that use the test's `Framework` instance.
|
||||
|
||||
When a test runs, callbacks defined for it with `BeforeEach` and `AfterEach`
|
||||
are called in first-in-first-out order. Since the migration to ginkgo v2 in
|
||||
Kubernetes 1.25, the `AfterEach` callback is called also when there has been a
|
||||
test failure. This can be used to run cleanup code for a test
|
||||
reliably. However,
|
||||
[`ginkgo.DeferCleanup`](https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup)
|
||||
is often a better alternative. Its callbacks are executed in first-in-last-out
|
||||
order.
|
||||
|
||||
`test/e2e/framework/internal/unittests/cleanup/cleanup.go` shows how these
|
||||
different callbacks can be used and in which order they are going to run.
|
78
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
78
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// CleanupActionHandle is an integer pointer type for handling cleanup action
|
||||
type CleanupActionHandle *int
|
||||
type cleanupFuncHandle struct {
|
||||
actionHandle CleanupActionHandle
|
||||
actionHook func()
|
||||
}
|
||||
|
||||
var cleanupActionsLock sync.Mutex
|
||||
var cleanupHookList = []cleanupFuncHandle{}
|
||||
|
||||
// AddCleanupAction installs a function that will be called in the event of the
|
||||
// whole test being terminated. This allows arbitrary pieces of the overall
|
||||
// test to hook into SynchronizedAfterSuite().
|
||||
// The hooks are called in last-in-first-out order.
|
||||
func AddCleanupAction(fn func()) CleanupActionHandle {
|
||||
p := CleanupActionHandle(new(int))
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
c := cleanupFuncHandle{actionHandle: p, actionHook: fn}
|
||||
cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...)
|
||||
return p
|
||||
}
|
||||
|
||||
// RemoveCleanupAction removes a function that was installed by
|
||||
// AddCleanupAction.
|
||||
func RemoveCleanupAction(p CleanupActionHandle) {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
for i, item := range cleanupHookList {
|
||||
if item.actionHandle == p {
|
||||
cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
|
||||
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
|
||||
// may remove themselves.
|
||||
func RunCleanupActions() {
|
||||
list := []func(){}
|
||||
func() {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
for _, p := range cleanupHookList {
|
||||
list = append(list, p.actionHook)
|
||||
}
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
|
||||
fn()
|
||||
}
|
||||
}
|
187
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
Normal file
187
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// EventsLister is a func that lists events.
|
||||
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
||||
|
||||
// dumpEventsInNamespace dumps events in the given namespace.
|
||||
func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
|
||||
events, err := eventsLister(metav1.ListOptions{}, namespace)
|
||||
framework.ExpectNoError(err, "failed to list events in namespace %q", namespace)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items)))
|
||||
// Sort events by their first timestamp
|
||||
sortedEvents := events.Items
|
||||
if len(sortedEvents) > 1 {
|
||||
sort.Sort(byFirstTimestamp(sortedEvents))
|
||||
}
|
||||
for _, e := range sortedEvents {
|
||||
framework.Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||
}
|
||||
// Note that we don't wait for any Cleanup to propagate, which means
|
||||
// that if you delete a bunch of pods right before ending your test,
|
||||
// you may or may not see the killing/deletion/Cleanup events.
|
||||
}
|
||||
|
||||
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
|
||||
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return c.CoreV1().Events(ns).List(context.TODO(), opts)
|
||||
}, namespace)
|
||||
|
||||
e2epod.DumpAllPodInfoForNamespace(c, namespace, framework.TestContext.ReportDir)
|
||||
|
||||
// If cluster is large, then the following logs are basically useless, because:
|
||||
// 1. it takes tens of minutes or hours to grab all of them
|
||||
// 2. there are so many of them that working with them are mostly impossible
|
||||
// So we dump them only if the cluster is relatively small.
|
||||
maxNodesForDump := framework.TestContext.MaxNodesToGather
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("unable to fetch node list: %v", err)
|
||||
return
|
||||
}
|
||||
if len(nodes.Items) <= maxNodesForDump {
|
||||
dumpAllNodeInfo(c, nodes)
|
||||
} else {
|
||||
framework.Logf("skipping dumping cluster info - cluster too large")
|
||||
}
|
||||
}
|
||||
|
||||
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
|
||||
type byFirstTimestamp []v1.Event
|
||||
|
||||
func (o byFirstTimestamp) Len() int { return len(o) }
|
||||
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o byFirstTimestamp) Less(i, j int) bool {
|
||||
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
|
||||
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
|
||||
}
|
||||
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
|
||||
}
|
||||
|
||||
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
|
||||
names := make([]string, len(nodes.Items))
|
||||
for ix := range nodes.Items {
|
||||
names[ix] = nodes.Items[ix].Name
|
||||
}
|
||||
DumpNodeDebugInfo(c, names, framework.Logf)
|
||||
}
|
||||
|
||||
// DumpNodeDebugInfo dumps debug information of the given nodes.
|
||||
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
|
||||
for _, n := range nodeNames {
|
||||
logFunc("\nLogging node info for node %v", n)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting node info %v", err)
|
||||
}
|
||||
logFunc("Node Info: %v", node)
|
||||
|
||||
logFunc("\nLogging kubelet events for node %v", n)
|
||||
for _, e := range getNodeEvents(c, n) {
|
||||
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
|
||||
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
|
||||
}
|
||||
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
|
||||
podList, err := getKubeletPods(c, n)
|
||||
if err != nil {
|
||||
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
|
||||
continue
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.InitContainerStatuses {
|
||||
logFunc("\tInit container %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
for _, c := range p.Status.ContainerStatuses {
|
||||
logFunc("\tContainer %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
}
|
||||
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
|
||||
// TODO: Log node resource info
|
||||
}
|
||||
}
|
||||
|
||||
// getKubeletPods retrieves the list of pods on the kubelet.
|
||||
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
|
||||
var client restclient.Result
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
// call chain tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165
|
||||
client = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, framework.KubeletPort)).
|
||||
Suffix("pods").
|
||||
Do(context.TODO())
|
||||
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
result := &v1.PodList{}
|
||||
if err := client.Into(result); err != nil {
|
||||
return &v1.PodList{}, err
|
||||
}
|
||||
return result, nil
|
||||
case <-time.After(framework.PodGetTimeout):
|
||||
return &v1.PodList{}, fmt.Errorf("Waiting up to %v for getting the list of pods", framework.PodGetTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// logNodeEvents logs kubelet events from the given node. This includes kubelet
|
||||
// restart and node unhealthy events. Note that listing events like this will mess
|
||||
// with latency metrics, beware of calling it during a test.
|
||||
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Node",
|
||||
"involvedObject.name": nodeName,
|
||||
"involvedObject.namespace": metav1.NamespaceAll,
|
||||
"source": "kubelet",
|
||||
}.AsSelector().String()
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
framework.Logf("Unexpected error retrieving node events %v", err)
|
||||
return []v1.Event{}
|
||||
}
|
||||
return events.Items
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -27,7 +27,7 @@ import (
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
@ -109,7 +109,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
||||
|
||||
// PrintJSON returns the summary of log size data with JSON format.
|
||||
func (s *LogsSizeDataSummary) PrintJSON() string {
|
||||
return PrettyPrintJSON(*s)
|
||||
return framework.PrettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of log size data summary.
|
||||
@ -158,8 +158,8 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
|
||||
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
|
||||
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := e2essh.NodeSSHHosts(c)
|
||||
ExpectNoError(err)
|
||||
instanceAddress := APIAddress() + ":22"
|
||||
framework.ExpectNoError(err)
|
||||
instanceAddress := framework.APIAddress() + ":22"
|
||||
|
||||
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
|
||||
workers := make([]*LogSizeGatherer, workersNo)
|
||||
@ -256,13 +256,13 @@ func (g *LogSizeGatherer) Work() bool {
|
||||
sshResult, err := e2essh.SSH(
|
||||
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
|
||||
workItem.ip,
|
||||
TestContext.Provider,
|
||||
framework.TestContext.Provider,
|
||||
)
|
||||
if err != nil {
|
||||
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
||||
framework.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
||||
// In case of repeated error give up.
|
||||
if workItem.backoffMultiplier >= 128 {
|
||||
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
||||
framework.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
||||
g.wg.Done()
|
||||
return false
|
||||
}
|
||||
@ -278,7 +278,7 @@ func (g *LogSizeGatherer) Work() bool {
|
||||
path := results[i]
|
||||
size, err := strconv.Atoi(results[i+1])
|
||||
if err != nil {
|
||||
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||
framework.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||
continue
|
||||
}
|
||||
g.data.addNewData(workItem.ip, path, now, size)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@ -38,7 +38,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
@ -91,7 +91,7 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
||||
|
||||
// PrintJSON prints resource usage summary in JSON.
|
||||
func (s *ResourceUsageSummary) PrintJSON() string {
|
||||
return PrettyPrintJSON(*s)
|
||||
return framework.PrettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
// SummaryKind returns string of ResourceUsageSummary
|
||||
@ -198,13 +198,13 @@ func (w *resourceGatherWorker) singleProbe() {
|
||||
} else {
|
||||
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
|
||||
if err != nil {
|
||||
Logf("Error while reading data from %v: %v", w.nodeName, err)
|
||||
framework.Logf("Error while reading data from %v: %v", w.nodeName, err)
|
||||
return
|
||||
}
|
||||
for k, v := range nodeUsage {
|
||||
data[k] = v
|
||||
if w.printVerboseLogs {
|
||||
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
|
||||
framework.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -290,13 +290,13 @@ func getOneTimeResourceUsageOnNode(
|
||||
|
||||
// getStatsSummary contacts kubelet for the container information.
|
||||
func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, KubeletPort)).
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, framework.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do(ctx).Raw()
|
||||
|
||||
@ -322,7 +322,7 @@ func removeUint64Ptr(ptr *uint64) uint64 {
|
||||
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer w.wg.Done()
|
||||
defer Logf("Closing worker for %v", w.nodeName)
|
||||
defer framework.Logf("Closing worker for %v", w.nodeName)
|
||||
defer func() { w.finished = true }()
|
||||
select {
|
||||
case <-time.After(initialSleep):
|
||||
@ -384,7 +384,7 @@ func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, erro
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) < 1 {
|
||||
Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
|
||||
framework.Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) {
|
||||
@ -422,7 +422,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
if pods == nil {
|
||||
pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Pods: %v", err)
|
||||
framework.Logf("Error while listing Pods: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -458,7 +458,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
}
|
||||
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Nodes: %v", err)
|
||||
framework.Logf("Error while listing Nodes: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -510,7 +510,7 @@ func (g *ContainerResourceGatherer) StartGatheringData() {
|
||||
// specified resource constraints.
|
||||
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
|
||||
close(g.stopCh)
|
||||
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
|
||||
framework.Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
g.workerWg.Wait()
|
||||
@ -518,7 +518,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
Logf("Waitgroup finished.")
|
||||
framework.Logf("Waitgroup finished.")
|
||||
case <-time.After(2 * time.Minute):
|
||||
unfinished := make([]string, 0)
|
||||
for i := range g.workers {
|
||||
@ -526,11 +526,11 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
unfinished = append(unfinished, g.workers[i].nodeName)
|
||||
}
|
||||
}
|
||||
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
|
||||
framework.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
|
||||
}
|
||||
|
||||
if len(percentiles) == 0 {
|
||||
Logf("Warning! Empty percentile list for stopAndPrintData.")
|
||||
framework.Logf("Warning! Empty percentile list for stopAndPrintData.")
|
||||
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
|
||||
}
|
||||
data := make(map[int]ResourceUsagePerContainer)
|
||||
@ -604,7 +604,7 @@ type kubemarkResourceUsage struct {
|
||||
}
|
||||
|
||||
func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), APIAddress()+":22", TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -617,7 +617,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
|
||||
// Get kubernetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
framework.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
return nil
|
||||
}
|
||||
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
||||
@ -635,7 +635,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
|
||||
// Get etcd resource usage
|
||||
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
framework.Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
return nil
|
||||
}
|
||||
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
340
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
340
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -50,10 +49,6 @@ import (
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -61,6 +56,34 @@ const (
|
||||
DefaultNamespaceDeletionTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
// NewFrameworkExtensions lists functions that get called by
|
||||
// NewFramework after constructing a new framework and after
|
||||
// calling ginkgo.BeforeEach for the framework.
|
||||
//
|
||||
// This can be used by extensions of the core framework to modify
|
||||
// settings in the framework instance or to add additional callbacks
|
||||
// with gingko.BeforeEach/AfterEach/DeferCleanup.
|
||||
//
|
||||
// When a test runs, functions will be invoked in this order:
|
||||
// - BeforeEaches defined by tests before f.NewDefaultFramework
|
||||
// in the order in which they were defined (first-in-first-out)
|
||||
// - f.BeforeEach
|
||||
// - BeforeEaches defined by tests after f.NewDefaultFramework
|
||||
// - It callback
|
||||
// - all AfterEaches in the order in which they were defined
|
||||
// - all DeferCleanups with the order reversed (first-in-last-out)
|
||||
// - f.AfterEach
|
||||
//
|
||||
// Because a test might skip test execution in a BeforeEach that runs
|
||||
// before f.BeforeEach, AfterEach callbacks that depend on the
|
||||
// framework instance must check whether it was initialized. They can
|
||||
// do that by checking f.ClientSet for nil. DeferCleanup callbacks
|
||||
// don't need to do this because they get defined when the test
|
||||
// runs.
|
||||
NewFrameworkExtensions []func(f *Framework)
|
||||
)
|
||||
|
||||
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
||||
// Eventual goal is to merge this with integration test framework.
|
||||
type Framework struct {
|
||||
@ -85,32 +108,9 @@ type Framework struct {
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
|
||||
|
||||
gatherer *ContainerResourceGatherer
|
||||
// Constraints that passed to a check which is executed after data is gathered to
|
||||
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
|
||||
// as expectations vary greatly. Constraints are grouped by the container names.
|
||||
AddonResourceConstraints map[string]ResourceConstraint
|
||||
|
||||
logsSizeWaitGroup sync.WaitGroup
|
||||
logsSizeCloseChannel chan bool
|
||||
logsSizeVerifier *LogsSizeVerifier
|
||||
|
||||
// Flaky operation failures in an e2e test can be captured through this.
|
||||
flakeReport *FlakeReport
|
||||
|
||||
// To make sure that this framework cleans up after itself, no matter what,
|
||||
// we install a Cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all Cleanup actions.
|
||||
cleanupHandle CleanupActionHandle
|
||||
|
||||
// afterEaches is a map of name to function to be called after each test. These are not
|
||||
// cleared. The call order is randomized so that no dependencies can grow between
|
||||
// the various afterEaches
|
||||
afterEaches map[string]AfterEachActionFunc
|
||||
|
||||
// beforeEachStarted indicates that BeforeEach has started
|
||||
beforeEachStarted bool
|
||||
|
||||
// configuration for framework's client
|
||||
Options Options
|
||||
|
||||
@ -118,15 +118,17 @@ type Framework struct {
|
||||
// or stdout if ReportDir is not set once test ends.
|
||||
TestSummaries []TestDataSummary
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest e2emetrics.Collection
|
||||
|
||||
// Timeouts contains the custom timeouts used during the test execution.
|
||||
Timeouts *TimeoutContext
|
||||
|
||||
// DumpAllNamespaceInfo is invoked by the framework to record
|
||||
// information about a namespace after a test failure.
|
||||
DumpAllNamespaceInfo DumpAllNamespaceInfoAction
|
||||
}
|
||||
|
||||
// AfterEachActionFunc is a function that can be called after each test
|
||||
type AfterEachActionFunc func(f *Framework, failed bool)
|
||||
// DumpAllNamespaceInfoAction is called after each failed test for namespaces
|
||||
// created for the test.
|
||||
type DumpAllNamespaceInfoAction func(f *Framework, namespace string)
|
||||
|
||||
// TestDataSummary is an interface for managing test data.
|
||||
type TestDataSummary interface {
|
||||
@ -149,8 +151,10 @@ func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *
|
||||
return f
|
||||
}
|
||||
|
||||
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
// NewDefaultFramework makes a new framework and sets up a BeforeEach which
|
||||
// initializes the framework instance. It cleans up with a DeferCleanup,
|
||||
// which runs last, so a AfterEach in the test still has a valid framework
|
||||
// instance.
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
options := Options{
|
||||
ClientQPS: 20,
|
||||
@ -162,79 +166,72 @@ func NewDefaultFramework(baseName string) *Framework {
|
||||
// NewFramework creates a test framework.
|
||||
func NewFramework(baseName string, options Options, client clientset.Interface) *Framework {
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
Timeouts: NewTimeoutContextWithDefaults(),
|
||||
BaseName: baseName,
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
Timeouts: NewTimeoutContextWithDefaults(),
|
||||
}
|
||||
|
||||
f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) {
|
||||
if !failed {
|
||||
return
|
||||
}
|
||||
if !TestContext.DumpLogsOnFailure {
|
||||
return
|
||||
}
|
||||
if !f.SkipNamespaceCreation {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(f.BeforeEach)
|
||||
ginkgo.AfterEach(f.AfterEach)
|
||||
// The order is important here: if the extension calls ginkgo.BeforeEach
|
||||
// itself, then it can be sure that f.BeforeEach already ran when its
|
||||
// own callback gets invoked.
|
||||
ginkgo.BeforeEach(f.BeforeEach, AnnotatedLocation("set up framework"))
|
||||
for _, extension := range NewFrameworkExtensions {
|
||||
extension(f)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
f.beforeEachStarted = true
|
||||
// DeferCleanup, in contrast to AfterEach, triggers execution in
|
||||
// first-in-last-out order. This ensures that the framework instance
|
||||
// remains valid as long as possible.
|
||||
//
|
||||
// In addition, AfterEach will not be called if a test never gets here.
|
||||
ginkgo.DeferCleanup(f.AfterEach, AnnotatedLocation("tear down framework"))
|
||||
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/v2/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.AfterEach)
|
||||
if f.ClientSet == nil {
|
||||
ginkgo.By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
ExpectNoError(err)
|
||||
// Registered later and thus runs before deleting namespaces.
|
||||
ginkgo.DeferCleanup(f.dumpNamespaceInfo, AnnotatedLocation("dump namespaces"))
|
||||
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
if f.Options.GroupVersion != nil {
|
||||
config.GroupVersion = f.Options.GroupVersion
|
||||
}
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.clientConfig = rest.CopyConfig(config)
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
ginkgo.By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
ExpectNoError(err)
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
if config.GroupVersion == nil {
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if config.NegotiatedSerializer == nil {
|
||||
config.NegotiatedSerializer = scheme.Codecs
|
||||
}
|
||||
restClient, err := rest.RESTClientFor(config)
|
||||
ExpectNoError(err)
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
ExpectNoError(err)
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
|
||||
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
if f.Options.GroupVersion != nil {
|
||||
config.GroupVersion = f.Options.GroupVersion
|
||||
}
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.clientConfig = rest.CopyConfig(config)
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
if config.GroupVersion == nil {
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if config.NegotiatedSerializer == nil {
|
||||
config.NegotiatedSerializer = scheme.Codecs
|
||||
}
|
||||
restClient, err := rest.RESTClientFor(config)
|
||||
ExpectNoError(err)
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
ExpectNoError(err)
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
|
||||
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
|
||||
@ -261,60 +258,23 @@ func (f *Framework) BeforeEach() {
|
||||
f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
var err error
|
||||
var nodeMode NodesSet
|
||||
switch TestContext.GatherKubeSystemResourceUsageData {
|
||||
case "master":
|
||||
nodeMode = MasterNodes
|
||||
case "masteranddns":
|
||||
nodeMode = MasterAndDNSNodes
|
||||
default:
|
||||
nodeMode = AllNodes
|
||||
}
|
||||
f.flakeReport = NewFlakeReport()
|
||||
}
|
||||
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
|
||||
InKubemark: ProviderIs("kubemark"),
|
||||
Nodes: nodeMode,
|
||||
ResourceDataGatheringPeriod: 60 * time.Second,
|
||||
ProbeDuration: 15 * time.Second,
|
||||
PrintVerboseLogs: false,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||
} else {
|
||||
go f.gatherer.StartGatheringData()
|
||||
}
|
||||
func (f *Framework) dumpNamespaceInfo() {
|
||||
if !ginkgo.CurrentSpecReport().Failed() {
|
||||
return
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
f.logsSizeWaitGroup = sync.WaitGroup{}
|
||||
f.logsSizeWaitGroup.Add(1)
|
||||
f.logsSizeCloseChannel = make(chan bool)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
|
||||
go func() {
|
||||
f.logsSizeVerifier.Run()
|
||||
f.logsSizeWaitGroup.Done()
|
||||
}()
|
||||
if !TestContext.DumpLogsOnFailure {
|
||||
return
|
||||
}
|
||||
|
||||
gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master"
|
||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
} else {
|
||||
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
ginkgo.By("dump namespace information after failure", func() {
|
||||
if !f.SkipNamespaceCreation {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
f.DumpAllNamespaceInfo(f, ns.Name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
f.flakeReport = NewFlakeReport()
|
||||
})
|
||||
}
|
||||
|
||||
// printSummaries prints summaries of tests.
|
||||
@ -354,29 +314,8 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
}
|
||||
}
|
||||
|
||||
// AddAfterEach is a way to add a function to be called after every test. The execution order is intentionally random
|
||||
// to avoid growing dependencies. If you register the same name twice, it is a coding error and will panic.
|
||||
func (f *Framework) AddAfterEach(name string, fn AfterEachActionFunc) {
|
||||
if _, ok := f.afterEaches[name]; ok {
|
||||
panic(fmt.Sprintf("%q is already registered", name))
|
||||
}
|
||||
|
||||
if f.afterEaches == nil {
|
||||
f.afterEaches = map[string]AfterEachActionFunc{}
|
||||
}
|
||||
f.afterEaches[name] = fn
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
// If BeforeEach never started AfterEach should be skipped.
|
||||
// Currently some tests under e2e/storage have this condition.
|
||||
if !f.beforeEachStarted {
|
||||
return
|
||||
}
|
||||
|
||||
RemoveCleanupAction(f.cleanupHandle)
|
||||
|
||||
// This should not happen. Given ClientSet is a public field a test must have updated it!
|
||||
// Error out early before any API calls during cleanup.
|
||||
if f.ClientSet == nil {
|
||||
@ -398,8 +337,8 @@ func (f *Framework) AfterEach() {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
|
||||
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
|
||||
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
|
||||
f.DumpAllNamespaceInfo(f, ns.Name)
|
||||
}
|
||||
} else {
|
||||
Logf("Namespace %v was already deleted", ns.Name)
|
||||
@ -414,7 +353,9 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}
|
||||
|
||||
// Paranoia-- prevent reuse!
|
||||
// Unsetting this is relevant for a following test that uses
|
||||
// the same instance because it might not reach f.BeforeEach
|
||||
// when some other BeforeEach skips the test first.
|
||||
f.Namespace = nil
|
||||
f.clientConfig = nil
|
||||
f.ClientSet = nil
|
||||
@ -430,42 +371,6 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}()
|
||||
|
||||
// run all aftereach functions in random order to ensure no dependencies grow
|
||||
for _, afterEachFn := range f.afterEaches {
|
||||
afterEachFn(f, ginkgo.CurrentSpecReport().Failed())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
ginkgo.By("Collecting resource usage data")
|
||||
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
|
||||
defer ExpectNoError(resourceViolationError)
|
||||
f.TestSummaries = append(f.TestSummaries, summary)
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
ginkgo.By("Gathering log sizes data")
|
||||
close(f.logsSizeCloseChannel)
|
||||
f.logsSizeWaitGroup.Wait()
|
||||
f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
|
||||
}
|
||||
|
||||
if TestContext.GatherMetricsAfterTest != "false" {
|
||||
ginkgo.By("Gathering metrics")
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
}
|
||||
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
|
||||
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
|
||||
}
|
||||
}
|
||||
|
||||
TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
|
||||
|
||||
// Report any flakes that were observed in the e2e test and reset.
|
||||
@ -475,13 +380,6 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
|
||||
printSummaries(f.TestSummaries, f.BaseName)
|
||||
|
||||
// Check whether all nodes are ready after the test.
|
||||
// This is explicitly done at the very end of the test, to avoid
|
||||
// e.g. not removing namespace in case of this failure.
|
||||
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
||||
Failf("All nodes should be ready after test, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to
|
||||
@ -510,8 +408,8 @@ func (f *Framework) DeleteNamespace(name string) {
|
||||
}
|
||||
}()
|
||||
// if current test failed then we should dump namespace information
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, name)
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
|
||||
f.DumpAllNamespaceInfo(f, name)
|
||||
}
|
||||
|
||||
}
|
||||
@ -574,20 +472,6 @@ func (f *Framework) ClientConfig() *rest.Config {
|
||||
return ret
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
|
||||
}
|
||||
|
||||
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// KubeUser is a struct for managing kubernetes user info.
|
||||
type KubeUser struct {
|
||||
Name string `yaml:"name"`
|
||||
|
33
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
// AnnotatedLocation can be used to provide more informative source code
|
||||
// locations by passing the result as additional parameter to a
|
||||
// BeforeEach/AfterEach/DeferCleanup/It/etc.
|
||||
func AnnotatedLocation(annotation string) types.CodeLocation {
|
||||
codeLocation := types.NewCodeLocation(1)
|
||||
codeLocation.FileName = path.Base(codeLocation.FileName)
|
||||
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
|
||||
return codeLocation
|
||||
}
|
106
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
106
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
|
||||
// with structured data instead of a constant string.
|
||||
package ginkgowrapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
// FailurePanic is the value that will be panicked from Fail.
|
||||
type FailurePanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
const ginkgoFailurePanic = `
|
||||
Your test failed.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
defer GinkgoRecover()
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
|
||||
// String makes FailurePanic look like the old Ginkgo panic when printed.
|
||||
func (FailurePanic) String() string { return ginkgoFailurePanic }
|
||||
|
||||
// Fail wraps ginkgo.Fail so that it panics with more useful
|
||||
// information about the failure. This function will panic with a
|
||||
// FailurePanic.
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
fp := FailurePanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(fp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Fail(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
stack := debug.Stack()
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
|
||||
var prunedStack []string
|
||||
|
||||
// skip the top of the stack
|
||||
for i := 0; i < 2*skip+1; i++ {
|
||||
scanner.Scan()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if stackSkipPattern.Match(scanner.Bytes()) {
|
||||
scanner.Scan() // these come in pairs
|
||||
} else {
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
scanner.Scan() // these come in pairs
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
191
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go
generated
vendored
Normal file
191
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// KubectlBuilder is used to build, customize and execute a kubectl Command.
|
||||
// Add more functions to customize the builder as needed.
|
||||
type KubectlBuilder struct {
|
||||
cmd *exec.Cmd
|
||||
timeout <-chan time.Time
|
||||
}
|
||||
|
||||
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
|
||||
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
|
||||
b := new(KubectlBuilder)
|
||||
tk := NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
|
||||
b.cmd = tk.KubectlCmd(args...)
|
||||
return b
|
||||
}
|
||||
|
||||
// WithEnv sets the given environment and returns itself.
|
||||
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
|
||||
b.cmd.Env = env
|
||||
return b
|
||||
}
|
||||
|
||||
// WithTimeout sets the given timeout and returns itself.
|
||||
func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder {
|
||||
b.timeout = t
|
||||
return b
|
||||
}
|
||||
|
||||
// WithStdinData sets the given data to stdin and returns itself.
|
||||
func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder {
|
||||
b.cmd.Stdin = strings.NewReader(data)
|
||||
return &b
|
||||
}
|
||||
|
||||
// WithStdinReader sets the given reader and returns itself.
|
||||
func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
|
||||
b.cmd.Stdin = reader
|
||||
return &b
|
||||
}
|
||||
|
||||
// ExecOrDie runs the kubectl executable or dies if error occurs.
|
||||
func (b KubectlBuilder) ExecOrDie(namespace string) string {
|
||||
str, err := b.Exec()
|
||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||
if isTimeout(err) {
|
||||
framework.Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
|
||||
time.Sleep(2 * time.Second)
|
||||
retryStr, retryErr := RunKubectl(namespace, "version")
|
||||
framework.Logf("stdout: %q", retryStr)
|
||||
framework.Logf("err: %v", retryErr)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
return str
|
||||
}
|
||||
|
||||
func isTimeout(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Exec runs the kubectl executable.
|
||||
func (b KubectlBuilder) Exec() (string, error) {
|
||||
stdout, _, err := b.ExecWithFullOutput()
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr.
|
||||
func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd := b.cmd
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
|
||||
if err := cmd.Start(); err != nil {
|
||||
return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err)
|
||||
}
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- cmd.Wait()
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
var rc = 127
|
||||
if ee, ok := err.(*exec.ExitError); ok {
|
||||
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
|
||||
framework.Logf("rc: %d", rc)
|
||||
}
|
||||
return stdout.String(), stderr.String(), uexec.CodeExitError{
|
||||
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err),
|
||||
Code: rc,
|
||||
}
|
||||
}
|
||||
case <-b.timeout:
|
||||
b.cmd.Process.Kill()
|
||||
return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr)
|
||||
}
|
||||
framework.Logf("stderr: %q", stderr.String())
|
||||
framework.Logf("stdout: %q", stdout.String())
|
||||
return stdout.String(), stderr.String(), nil
|
||||
}
|
||||
|
||||
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectlOrDie(namespace string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectl is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectl(namespace string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).Exec()
|
||||
}
|
||||
|
||||
// RunKubectlWithFullOutput is a convenience wrapper over kubectlBuilder
|
||||
// It will also return the command's stderr.
|
||||
func RunKubectlWithFullOutput(namespace string, args ...string) (string, string, error) {
|
||||
return NewKubectlCommand(namespace, args...).ExecWithFullOutput()
|
||||
}
|
||||
|
||||
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
|
||||
}
|
||||
|
||||
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
|
||||
func RunKubemciWithKubeconfig(args ...string) (string, error) {
|
||||
if framework.TestContext.KubeConfig != "" {
|
||||
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+framework.TestContext.KubeConfig)
|
||||
}
|
||||
return RunKubemciCmd(args...)
|
||||
}
|
||||
|
||||
// RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
|
||||
// It assumes that kubemci exists in PATH.
|
||||
func RunKubemciCmd(args ...string) (string, error) {
|
||||
// kubemci is assumed to be in PATH.
|
||||
kubemci := "kubemci"
|
||||
b := new(KubectlBuilder)
|
||||
args = append(args, "--gcp-project="+framework.TestContext.CloudConfig.ProjectID)
|
||||
|
||||
b.cmd = exec.Command(kubemci, args...)
|
||||
return b.Exec()
|
||||
}
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -109,7 +109,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
|
||||
logFunc("Running kubectl logs on non-ready containers in %v", ns)
|
||||
for _, pod := range podList.Items {
|
||||
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
|
||||
kubectlLogPod(c, pod, "", e2elog.Logf)
|
||||
kubectlLogPod(c, pod, "", framework.Logf)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -144,7 +144,7 @@ func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, p
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
framework.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -155,7 +155,7 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
|
||||
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
framework.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
@ -163,19 +163,19 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
|
||||
func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
framework.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
framework.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
framework.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
@ -200,7 +200,7 @@ func (tk *TestKubeconfig) kubectlExec(namespace string, podName, containerName s
|
||||
cmd := tk.KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
@ -24,9 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
)
|
||||
|
||||
func nowStamp() string {
|
||||
@ -42,13 +40,13 @@ func Logf(format string, args ...interface{}) {
|
||||
log("INFO", format, args...)
|
||||
}
|
||||
|
||||
// Failf logs the fail info, including a stack trace starts at 2 levels above its caller
|
||||
// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "f").
|
||||
// Failf logs the fail info, including a stack trace starts with its direct caller
|
||||
// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "g").
|
||||
func Failf(format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
skip := 2
|
||||
skip := 1
|
||||
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
|
||||
ginkgo.Fail(nowStamp()+": "+msg, skip)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
@ -60,7 +58,7 @@ func Fail(msg string, callerSkip ...int) {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
|
||||
ginkgo.Fail(nowStamp()+": "+msg, skip)
|
||||
}
|
||||
|
||||
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)
|
||||
@ -79,7 +77,7 @@ var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)
|
||||
func PrunedStack(skip int) []byte {
|
||||
fullStackTrace := debug.Stack()
|
||||
stack := bytes.Split(fullStackTrace, []byte("\n"))
|
||||
// Ensure that the even entries are the method names and the
|
||||
// Ensure that the even entries are the method names and
|
||||
// the odd entries the source code information.
|
||||
if len(stack) > 0 && bytes.HasPrefix(stack[0], []byte("goroutine ")) {
|
||||
// Ignore "goroutine 29 [running]:" line.
|
||||
|
54
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package log will be removed after switching to use core framework log.
|
||||
// Do not make further changes here!
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
)
|
||||
|
||||
func nowStamp() string {
|
||||
return time.Now().Format(time.StampMilli)
|
||||
}
|
||||
|
||||
func log(level string, format string, args ...interface{}) {
|
||||
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
|
||||
}
|
||||
|
||||
// Logf logs the info.
|
||||
func Logf(format string, args ...interface{}) {
|
||||
log("INFO", format, args...)
|
||||
}
|
||||
|
||||
// Failf logs the fail info.
|
||||
func Failf(format string, args ...interface{}) {
|
||||
FailfWithOffset(1, format, args...)
|
||||
}
|
||||
|
||||
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
|
||||
func FailfWithOffset(offset int, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
log("FAIL", msg)
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
|
||||
}
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/e2e_metrics.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/e2e_metrics.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -94,12 +94,12 @@ func (m *ComponentCollection) PrintHumanReadable() string {
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
e2elog.Logf("Error building encoder: %v", err)
|
||||
framework.Logf("Error building encoder: %v", err)
|
||||
return ""
|
||||
}
|
||||
formatted := &bytes.Buffer{}
|
||||
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
|
||||
e2elog.Logf("Error indenting: %v", err)
|
||||
framework.Logf("Error indenting: %v", err)
|
||||
return ""
|
||||
}
|
||||
return string(formatted.Bytes())
|
||||
|
73
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/grab.go
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/grab.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func GrabBeforeEach(f *framework.Framework) (result *Collection) {
|
||||
gatherMetricsAfterTest := framework.TestContext.GatherMetricsAfterTest == "true" || framework.TestContext.GatherMetricsAfterTest == "master"
|
||||
if !gatherMetricsAfterTest || !framework.TestContext.IncludeClusterAutoscalerMetrics {
|
||||
return nil
|
||||
}
|
||||
|
||||
ginkgo.By("Gathering metrics before test", func() {
|
||||
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
return
|
||||
}
|
||||
metrics, err := grabber.Grab()
|
||||
if err != nil {
|
||||
framework.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
return
|
||||
}
|
||||
framework.Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
result = &metrics
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func GrabAfterEach(f *framework.Framework, before *Collection) {
|
||||
if framework.TestContext.GatherMetricsAfterTest == "false" {
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Gathering metrics after test", func() {
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := framework.TestContext.GatherMetricsAfterTest != "master" && !framework.ProviderIs("kubemark")
|
||||
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
return
|
||||
}
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
framework.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
return
|
||||
}
|
||||
if before == nil {
|
||||
before = &Collection{}
|
||||
}
|
||||
(*ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(*before)
|
||||
f.TestSummaries = append(f.TestSummaries, (*ComponentCollection)(&received))
|
||||
})
|
||||
}
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/interesting_metrics.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/interesting_metrics.go
generated
vendored
@ -46,6 +46,7 @@ var interestingKubeletMetrics = []string{
|
||||
"kubelet_docker_operations_errors_total",
|
||||
"kubelet_docker_operations_duration_seconds",
|
||||
"kubelet_pod_start_duration_seconds",
|
||||
"kubelet_pod_start_sli_duration_seconds",
|
||||
"kubelet_pod_worker_duration_seconds",
|
||||
"kubelet_pod_worker_start_duration_seconds",
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -44,6 +44,8 @@ const (
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
podStartDurationKey = "pod_start_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
PodStartSLIDurationKey = "pod_start_sli_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
cgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
podWorkerStartDurationKey = "pod_worker_start_duration_seconds"
|
||||
@ -175,6 +177,7 @@ func GetDefaultKubeletLatencyMetrics(ms KubeletMetrics) KubeletLatencyMetrics {
|
||||
podWorkerDurationKey,
|
||||
podWorkerStartDurationKey,
|
||||
podStartDurationKey,
|
||||
PodStartSLIDurationKey,
|
||||
cgroupManagerOperationsKey,
|
||||
dockerOperationsLatencyKey,
|
||||
podWorkerStartDurationKey,
|
||||
@ -226,7 +229,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
|
||||
for _, m := range latencyMetrics {
|
||||
if m.Latency > threshold {
|
||||
badMetrics = append(badMetrics, m)
|
||||
e2elog.Logf("%+v", m)
|
||||
framework.Logf("%+v", m)
|
||||
}
|
||||
}
|
||||
return badMetrics, nil
|
||||
|
165
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
Normal file
165
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
)
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
if framework.TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediate(
|
||||
30*time.Second,
|
||||
timeout,
|
||||
CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
)
|
||||
}
|
||||
|
||||
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
|
||||
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
|
||||
framework.ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasLabel expects that the given node has the given label pair.
|
||||
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
||||
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(node.Labels[labelKey], labelValue)
|
||||
}
|
||||
|
||||
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
|
||||
// won't fail if target label doesn't exist or has been removed.
|
||||
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
|
||||
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
|
||||
framework.ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
|
||||
|
||||
ginkgo.By("verifying the node doesn't have the label " + labelKey)
|
||||
framework.ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasTaint expects that the node has the given taint.
|
||||
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
ginkgo.By("verifying the node has the taint " + taint.ToString())
|
||||
if has, err := NodeHasTaint(c, nodeName, taint); !has {
|
||||
framework.ExpectNoError(err)
|
||||
framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// NodeHasTaint returns true if the node has the given taint, else returns false.
|
||||
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
nodeTaints := node.Spec.Taints
|
||||
|
||||
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
|
||||
// framework.TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
|
||||
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
||||
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
||||
// default test add-ons.
|
||||
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if err := allNodesReady(c, timeout); err != nil {
|
||||
return fmt.Errorf("checking for ready nodes: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func allNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if framework.TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
|
||||
var notReady []*v1.Node
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !IsConditionSetAsExpected(node, v1.NodeReady, true) {
|
||||
notReady = append(notReady, node)
|
||||
}
|
||||
}
|
||||
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
|
||||
// to make it possible e.g. for incorrect deployment of some small percentage
|
||||
// of nodes (which we allow in cluster validation). Some nodes that are not
|
||||
// provisioned correctly at startup will never become ready (e.g. when something
|
||||
// won't install correctly), so we can't expect them to be ready at any point.
|
||||
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(notReady) > framework.TestContext.AllowedNotReadyNodes {
|
||||
msg := ""
|
||||
for _, node := range notReady {
|
||||
msg = fmt.Sprintf("%s, %s", msg, node.Name)
|
||||
}
|
||||
return fmt.Errorf("Not ready nodes: %#v", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
93
vendor/k8s.io/kubernetes/test/e2e/framework/node/node_killer.go
generated
vendored
Normal file
93
vendor/k8s.io/kubernetes/test/e2e/framework/node/node_killer.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// NodeKiller is a utility to simulate node failures.
|
||||
type NodeKiller struct {
|
||||
config framework.NodeKillerConfig
|
||||
client clientset.Interface
|
||||
provider string
|
||||
}
|
||||
|
||||
// NewNodeKiller creates new NodeKiller.
|
||||
func NewNodeKiller(config framework.NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
|
||||
config.NodeKillerStopCh = make(chan struct{})
|
||||
return &NodeKiller{config, client, provider}
|
||||
}
|
||||
|
||||
// Run starts NodeKiller until stopCh is closed.
|
||||
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
|
||||
// wait.JitterUntil starts work immediately, so wait first.
|
||||
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
|
||||
wait.JitterUntil(func() {
|
||||
nodes := k.pickNodes()
|
||||
k.kill(nodes)
|
||||
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
|
||||
}
|
||||
|
||||
func (k *NodeKiller) pickNodes() []v1.Node {
|
||||
nodes, err := GetReadySchedulableNodes(k.client)
|
||||
framework.ExpectNoError(err)
|
||||
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
|
||||
|
||||
nodes, err = GetBoundedReadySchedulableNodes(k.client, numNodes)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes.Items
|
||||
}
|
||||
|
||||
func (k *NodeKiller) kill(nodes []v1.Node) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(nodes))
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
framework.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
||||
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
||||
if err != nil {
|
||||
framework.Logf("ERROR while stopping node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(k.config.SimulatedDowntime)
|
||||
|
||||
framework.Logf("Rebooting %q to repair the node", node.Name)
|
||||
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
|
||||
if err != nil {
|
||||
framework.Logf("ERROR while rebooting node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
30
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -40,7 +40,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
netutil "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf(msg)
|
||||
framework.Logf(msg)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -137,7 +137,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
return true
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||
}
|
||||
return false
|
||||
@ -146,7 +146,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
return true
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||
}
|
||||
return false
|
||||
@ -154,7 +154,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
|
||||
framework.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -196,7 +196,7 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
|
||||
func TotalRegistered(c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list nodes: %v", err)
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
return len(nodes.Items), nil
|
||||
@ -206,7 +206,7 @@ func TotalRegistered(c clientset.Interface) (int, error) {
|
||||
func TotalReady(c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list nodes: %v", err)
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ func TotalReady(c clientset.Interface) (int, error) {
|
||||
// GetExternalIP returns node external IP concatenated with port 22 for ssh
|
||||
// e.g. 1.2.3.4:22
|
||||
func GetExternalIP(node *v1.Node) (string, error) {
|
||||
e2elog.Logf("Getting external IP address for %s", node.Name)
|
||||
framework.Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
||||
@ -628,7 +628,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
for i, node := range nodes.Items {
|
||||
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
@ -884,16 +884,6 @@ func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
if taintExists(nodeUpdated.Spec.Taints, taint) {
|
||||
e2elog.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
42
vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
)
|
||||
|
||||
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
|
||||
func WaitForSSHTunnels(namespace string) {
|
||||
framework.Logf("Waiting for SSH tunnels to establish")
|
||||
e2ekubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
|
||||
"--image=busybox",
|
||||
"--restart=Never",
|
||||
"--command", "--",
|
||||
"echo", "Hello")
|
||||
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
|
||||
|
||||
// allow up to a minute for new ssh tunnels to establish
|
||||
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
|
||||
return err == nil, nil
|
||||
})
|
||||
}
|
24
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const sleepTime = 20 * time.Second
|
||||
@ -47,7 +47,7 @@ func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) e
|
||||
|
||||
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
|
||||
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
|
||||
framework.Logf("Waiting up to %v for all nodes to be ready", timeout)
|
||||
|
||||
var notReady []v1.Node
|
||||
var missingPodsPerNode map[string][]string
|
||||
@ -115,11 +115,11 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||
// is ConditionTrue; if it's false, it ensures the node condition is in any state
|
||||
// other than ConditionTrue (e.g. not true or unknown).
|
||||
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
|
||||
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
|
||||
framework.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Couldn't get node %s", name)
|
||||
framework.Logf("Couldn't get node %s", name)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.Node
|
||||
return true
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
|
||||
framework.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list nodes: %v", err)
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
continue
|
||||
}
|
||||
numNodes := len(nodes.Items)
|
||||
@ -163,10 +163,10 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
|
||||
numReady := len(nodes.Items)
|
||||
|
||||
if numNodes == size && numReady == size {
|
||||
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
|
||||
framework.Logf("Cluster has reached the desired number of ready nodes %d", size)
|
||||
return nodes.Items, nil
|
||||
}
|
||||
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
|
||||
framework.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
|
||||
}
|
||||
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
|
||||
}
|
||||
@ -215,7 +215,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
var terminalListNodesErr error
|
||||
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
||||
framework.Logf("Unexpected error listing nodes: %v", err)
|
||||
if attempt >= 3 {
|
||||
terminalListNodesErr = err
|
||||
}
|
||||
@ -236,9 +236,9 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
if len(nodesNotReadyYet) > 0 {
|
||||
// In large clusters, log them only every 10th pass.
|
||||
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
|
||||
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
|
||||
framework.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
|
||||
for _, node := range nodesNotReadyYet {
|
||||
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
|
||||
framework.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
|
||||
node.Name,
|
||||
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
|
||||
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
|
||||
@ -250,7 +250,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
if len(nodesNotReadyYet) > allowedNotReadyNodes {
|
||||
ready := len(allNodes.Items) - len(nodesNotReadyYet)
|
||||
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
|
||||
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
|
||||
framework.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
161
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
161
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -16,56 +16,6 @@ limitations under the License.
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const etcdImage = "3.5.5-0"
|
||||
|
||||
// EtcdUpgrade upgrades etcd on GCE.
|
||||
func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return etcdUpgradeGCE(targetStorage, targetVersion)
|
||||
default:
|
||||
return fmt.Errorf("EtcdUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
func etcdUpgradeGCE(targetStorage, targetVersion string) error {
|
||||
env := append(
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+targetVersion,
|
||||
"STORAGE_BACKEND="+targetStorage,
|
||||
"TEST_ETCD_IMAGE="+etcdImage)
|
||||
|
||||
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
}
|
||||
|
||||
// LocationParamGKE returns parameter related to location for gcloud command.
|
||||
func LocationParamGKE() string {
|
||||
if TestContext.CloudConfig.MultiMaster {
|
||||
// GKE Regional Clusters are being tested.
|
||||
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
|
||||
}
|
||||
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
|
||||
}
|
||||
|
||||
// AppendContainerCommandGroupIfNeeded returns container command group parameter if necessary.
|
||||
func AppendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
if TestContext.CloudConfig.Region != "" {
|
||||
@ -74,114 +24,3 @@ func AppendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// MasterUpgradeGKE upgrades master node to the specified version on GKE.
|
||||
func MasterUpgradeGKE(namespace string, v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
args := []string{
|
||||
"container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
LocationParamGKE(),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
"--master",
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
"--quiet",
|
||||
}
|
||||
_, _, err := RunCmd("gcloud", AppendContainerCommandGroupIfNeeded(args)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
WaitForSSHTunnels(namespace)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GCEUpgradeScript returns path of script for upgrading on GCE.
|
||||
func GCEUpgradeScript() string {
|
||||
if len(TestContext.GCEUpgradeScript) == 0 {
|
||||
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
|
||||
}
|
||||
return TestContext.GCEUpgradeScript
|
||||
}
|
||||
|
||||
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
|
||||
func WaitForSSHTunnels(namespace string) {
|
||||
Logf("Waiting for SSH tunnels to establish")
|
||||
RunKubectl(namespace, "run", "ssh-tunnel-test",
|
||||
"--image=busybox",
|
||||
"--restart=Never",
|
||||
"--command", "--",
|
||||
"echo", "Hello")
|
||||
defer RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
|
||||
|
||||
// allow up to a minute for new ssh tunnels to establish
|
||||
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
_, err := RunKubectl(namespace, "logs", "ssh-tunnel-test")
|
||||
return err == nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
// NodeKiller is a utility to simulate node failures.
|
||||
type NodeKiller struct {
|
||||
config NodeKillerConfig
|
||||
client clientset.Interface
|
||||
provider string
|
||||
}
|
||||
|
||||
// NewNodeKiller creates new NodeKiller.
|
||||
func NewNodeKiller(config NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
|
||||
config.NodeKillerStopCh = make(chan struct{})
|
||||
return &NodeKiller{config, client, provider}
|
||||
}
|
||||
|
||||
// Run starts NodeKiller until stopCh is closed.
|
||||
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
|
||||
// wait.JitterUntil starts work immediately, so wait first.
|
||||
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
|
||||
wait.JitterUntil(func() {
|
||||
nodes := k.pickNodes()
|
||||
k.kill(nodes)
|
||||
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
|
||||
}
|
||||
|
||||
func (k *NodeKiller) pickNodes() []v1.Node {
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(k.client)
|
||||
ExpectNoError(err)
|
||||
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
|
||||
|
||||
nodes, err = e2enode.GetBoundedReadySchedulableNodes(k.client, numNodes)
|
||||
ExpectNoError(err)
|
||||
return nodes.Items
|
||||
}
|
||||
|
||||
func (k *NodeKiller) kill(nodes []v1.Node) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(nodes))
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
||||
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
||||
if err != nil {
|
||||
Logf("ERROR while stopping node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(k.config.SimulatedDowntime)
|
||||
|
||||
Logf("Rebooting %q to repair the node", node.Name)
|
||||
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
|
||||
if err != nil {
|
||||
Logf("ERROR while rebooting node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -28,6 +28,11 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
VolumeMountPathTemplate = "/mnt/volume%d"
|
||||
VolumeMountPath1 = "/mnt/volume1"
|
||||
)
|
||||
|
||||
// Config is a struct containing all arguments for creating a pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as boolean arguments.
|
||||
type Config struct {
|
||||
@ -222,10 +227,11 @@ func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVol
|
||||
volumeIndex := 0
|
||||
for _, pvclaim := range pvcs {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
volumeMountPath := fmt.Sprintf(VolumeMountPathTemplate, volumeIndex+1)
|
||||
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: volumeMountPath})
|
||||
} else {
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: volumeMountPath})
|
||||
}
|
||||
volumes[volumeIndex] = v1.Volume{
|
||||
Name: volumename,
|
||||
@ -240,8 +246,9 @@ func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVol
|
||||
}
|
||||
for _, src := range inlineVolumeSources {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
volumeMountPath := fmt.Sprintf(VolumeMountPathTemplate, volumeIndex+1)
|
||||
// In-line volumes can be only filesystem, not block.
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: volumeMountPath})
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
|
||||
volumeIndex++
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -59,7 +59,7 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
|
||||
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
|
||||
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
@ -67,7 +67,7 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
}
|
||||
e2elog.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
|
||||
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
|
||||
err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
|
||||
@ -92,7 +92,7 @@ func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64
|
||||
|
||||
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
|
||||
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
|
||||
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package pod
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
@ -49,16 +50,16 @@ type ExecOptions struct {
|
||||
// ExecWithOptions executes a command in the specified container,
|
||||
// returning stdout, stderr and error. `options` allowed for
|
||||
// additional parameters to be passed.
|
||||
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
|
||||
func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, string, error) {
|
||||
if !options.Quiet {
|
||||
Logf("ExecWithOptions %+v", options)
|
||||
framework.Logf("ExecWithOptions %+v", options)
|
||||
}
|
||||
config, err := LoadConfig()
|
||||
ExpectNoError(err, "failed to load restclient config")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load restclient config")
|
||||
|
||||
const tty = false
|
||||
|
||||
Logf("ExecWithOptions: Clientset creation")
|
||||
framework.Logf("ExecWithOptions: Clientset creation")
|
||||
req := f.ClientSet.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(options.PodName).
|
||||
@ -75,7 +76,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
@ -85,8 +86,8 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
|
||||
|
||||
// ExecCommandInContainerWithFullOutput executes a command in the
|
||||
// specified container and return stdout, stderr and error
|
||||
func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) {
|
||||
return f.ExecWithOptions(ExecOptions{
|
||||
func ExecCommandInContainerWithFullOutput(f *framework.Framework, podName, containerName string, cmd ...string) (string, string, error) {
|
||||
return ExecWithOptions(f, ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: podName,
|
||||
@ -99,42 +100,42 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
|
||||
}
|
||||
|
||||
// ExecCommandInContainer executes a command in the specified container.
|
||||
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
||||
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||
Logf("Exec stderr: %q", stderr)
|
||||
ExpectNoError(err,
|
||||
func ExecCommandInContainer(f *framework.Framework, podName, containerName string, cmd ...string) string {
|
||||
stdout, stderr, err := ExecCommandInContainerWithFullOutput(f, podName, containerName, cmd...)
|
||||
framework.Logf("Exec stderr: %q", stderr)
|
||||
framework.ExpectNoError(err,
|
||||
"failed to execute command in pod %v, container %v: %v",
|
||||
podName, containerName, err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
// ExecShellInContainer executes the specified command on the pod's container.
|
||||
func (f *Framework) ExecShellInContainer(podName, containerName string, cmd string) string {
|
||||
return f.ExecCommandInContainer(podName, containerName, "/bin/sh", "-c", cmd)
|
||||
func ExecShellInContainer(f *framework.Framework, podName, containerName string, cmd string) string {
|
||||
return ExecCommandInContainer(f, podName, containerName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (f *Framework) execCommandInPod(podName string, cmd ...string) string {
|
||||
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "failed to get pod %v", podName)
|
||||
func execCommandInPod(f *framework.Framework, podName string, cmd ...string) string {
|
||||
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get pod %v", podName)
|
||||
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
|
||||
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
return ExecCommandInContainer(f, podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
func (f *Framework) execCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
|
||||
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "failed to get pod %v", podName)
|
||||
func execCommandInPodWithFullOutput(f *framework.Framework, podName string, cmd ...string) (string, string, error) {
|
||||
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get pod %v", podName)
|
||||
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
|
||||
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
return ExecCommandInContainerWithFullOutput(f, podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
// ExecShellInPod executes the specified command on the pod.
|
||||
func (f *Framework) ExecShellInPod(podName string, cmd string) string {
|
||||
return f.execCommandInPod(podName, "/bin/sh", "-c", cmd)
|
||||
func ExecShellInPod(f *framework.Framework, podName string, cmd string) string {
|
||||
return execCommandInPod(f, podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
// ExecShellInPodWithFullOutput executes the specified command on the Pod and returns stdout, stderr and error.
|
||||
func (f *Framework) ExecShellInPodWithFullOutput(podName string, cmd string) (string, string, error) {
|
||||
return f.execCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
|
||||
func ExecShellInPodWithFullOutput(f *framework.Framework, podName string, cmd string) (string, string, error) {
|
||||
return execCommandInPodWithFullOutput(f, podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
@ -142,7 +143,7 @@ func execute(method string, url *url.URL, config *restclient.Config, stdin io.Re
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.Stream(remotecommand.StreamOptions{
|
||||
return exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
238
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
Normal file
238
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package output
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
apiv1pod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
|
||||
const (
|
||||
// Poll is how often to Poll pods, nodes and claims.
|
||||
Poll = 2 * time.Second
|
||||
)
|
||||
|
||||
// LookForStringInPodExec looks for the given string in the output of a command
|
||||
// executed in the first container of specified pod.
|
||||
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForStringInPodExecToContainer(ns, podName, "", command, expectedString, timeout)
|
||||
}
|
||||
|
||||
// LookForStringInPodExecToContainer looks for the given string in the output of a
|
||||
// command executed in specified pod container, or first container if not specified.
|
||||
func LookForStringInPodExecToContainer(ns, podName, containerName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns)}
|
||||
if len(containerName) > 0 {
|
||||
args = append(args, fmt.Sprintf("--container=%s", containerName))
|
||||
}
|
||||
args = append(args, "--")
|
||||
args = append(args, command...)
|
||||
return e2ekubectl.RunKubectlOrDie(ns, args...)
|
||||
})
|
||||
}
|
||||
|
||||
// lookForString looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// the timeout is reached or the string is found. Returns last log and possibly
|
||||
// error if the string was not found.
|
||||
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
|
||||
result = fn()
|
||||
if strings.Contains(result, expectedString) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
|
||||
return
|
||||
}
|
||||
|
||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell.
|
||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||
return e2ekubectl.RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdWithFullOutput runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell. It will also return the command's stderr.
|
||||
func RunHostCmdWithFullOutput(ns, name, cmd string) (string, string, error) {
|
||||
return e2ekubectl.RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdOrDie calls RunHostCmd and dies on error.
|
||||
func RunHostCmdOrDie(ns, name, cmd string) string {
|
||||
stdout, err := RunHostCmd(ns, name, cmd)
|
||||
framework.Logf("stdout: %v", stdout)
|
||||
framework.ExpectNoError(err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
|
||||
// until it succeeds or the specified timeout expires.
|
||||
// This can be used with idempotent commands to deflake transient Node issues.
|
||||
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
|
||||
start := time.Now()
|
||||
for {
|
||||
out, err := RunHostCmd(ns, name, cmd)
|
||||
if err == nil {
|
||||
return out, nil
|
||||
}
|
||||
if elapsed := time.Since(start); elapsed > timeout {
|
||||
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
|
||||
}
|
||||
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// LookForStringInLog looks for the given string in the log of a specific pod container
|
||||
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return e2ekubectl.RunKubectlOrDie(ns, "logs", podName, container)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateEmptyFileOnPod creates empty file at given path on the pod.
|
||||
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
|
||||
_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
// DumpDebugInfo dumps debug info of tests.
|
||||
func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := e2ekubectl.RunKubectl(ns, "describe", "po", s.Name)
|
||||
framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := e2ekubectl.RunKubectl(ns, "logs", s.Name, "--tail=100")
|
||||
framework.Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
||||
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
|
||||
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
|
||||
func MatchContainerOutput(
|
||||
f *framework.Framework,
|
||||
pod *v1.Pod,
|
||||
containerName string,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
ns := pod.ObjectMeta.Namespace
|
||||
if ns == "" {
|
||||
ns = f.Namespace.Name
|
||||
}
|
||||
podClient := e2epod.PodClientNS(f, ns)
|
||||
|
||||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
ginkgo.By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod status: %v", err)
|
||||
}
|
||||
|
||||
if podErr != nil {
|
||||
// Pod failed. Dump all logs from all containers to see what's wrong
|
||||
_ = apiv1pod.VisitContainers(&podStatus.Spec, apiv1pod.AllFeatureEnabledContainers(), func(c *v1.Container, containerType apiv1pod.ContainerType) bool {
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
|
||||
} else {
|
||||
framework.Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, c.Name, logs)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
|
||||
}
|
||||
|
||||
framework.Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %v", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
func TestContainerOutput(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
|
||||
}
|
||||
|
||||
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func TestContainerOutputRegexp(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// TestContainerOutputMatcher runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using the given matcher.
|
||||
func TestContainerOutputMatcher(f *framework.Framework,
|
||||
scenarioName string,
|
||||
pod *v1.Pod,
|
||||
containerIndex int,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
|
||||
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
|
||||
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
|
||||
framework.Failf("Invalid container index: %d", containerIndex)
|
||||
}
|
||||
framework.ExpectNoError(MatchContainerOutput(f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -39,9 +39,9 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -66,10 +66,10 @@ const (
|
||||
// node e2e test.
|
||||
var ImagePrePullList sets.String
|
||||
|
||||
// PodClient is a convenience method for getting a pod client interface in the framework's namespace,
|
||||
// NewPodClient is a convenience method for getting a pod client interface in the framework's namespace,
|
||||
// possibly applying test-suite specific transformations to the pod spec, e.g. for
|
||||
// node e2e pod scheduling.
|
||||
func (f *Framework) PodClient() *PodClient {
|
||||
func NewPodClient(f *framework.Framework) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
@ -79,7 +79,7 @@ func (f *Framework) PodClient() *PodClient {
|
||||
// PodClientNS is a convenience method for getting a pod client interface in an alternative namespace,
|
||||
// possibly applying test-suite specific transformations to the pod spec, e.g. for
|
||||
// node e2e pod scheduling.
|
||||
func (f *Framework) PodClientNS(namespace string) *PodClient {
|
||||
func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
@ -88,7 +88,7 @@ func (f *Framework) PodClientNS(namespace string) *PodClient {
|
||||
|
||||
// PodClient is a struct for pod client.
|
||||
type PodClient struct {
|
||||
f *Framework
|
||||
f *framework.Framework
|
||||
v1core.PodInterface
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ type PodClient struct {
|
||||
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
c.mungeSpec(pod)
|
||||
p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
ExpectNoError(err, "Error creating Pod")
|
||||
framework.ExpectNoError(err, "Error creating Pod")
|
||||
return p
|
||||
}
|
||||
|
||||
@ -104,10 +104,10 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
|
||||
namespace := c.f.Namespace.Name
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, PodStartTimeout))
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
|
||||
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
return p
|
||||
}
|
||||
|
||||
@ -131,7 +131,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
|
||||
// there is any other apierrors. name is the pod name, updateFn is the function updating the
|
||||
// pod object.
|
||||
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
@ -139,11 +139,11 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
updateFn(pod)
|
||||
_, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
Logf("Successfully updated pod %q", name)
|
||||
framework.Logf("Successfully updated pod %q", name)
|
||||
return true, nil
|
||||
}
|
||||
if apierrors.IsConflict(err) {
|
||||
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
|
||||
@ -155,22 +155,22 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta
|
||||
namespace := c.f.Namespace.Name
|
||||
|
||||
podJS, err := json.Marshal(pod)
|
||||
ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
|
||||
|
||||
ecPod := pod.DeepCopy()
|
||||
ecPod.Spec.EphemeralContainers = append(ecPod.Spec.EphemeralContainers, *ec)
|
||||
ecJS, err := json.Marshal(ecPod)
|
||||
ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
|
||||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
|
||||
ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
|
||||
|
||||
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
|
||||
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
|
||||
framework.ExpectNoError(WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -180,27 +180,27 @@ func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeou
|
||||
namespace := c.f.Namespace.Name
|
||||
err := c.Delete(context.TODO(), name, options)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
framework.Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
if !TestContext.NodeE2E {
|
||||
if !framework.TestContext.NodeE2E {
|
||||
return
|
||||
}
|
||||
|
||||
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(TestContext.NodeName)), "Test misconfigured")
|
||||
pod.Spec.NodeName = TestContext.NodeName
|
||||
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(framework.TestContext.NodeName)), "Test misconfigured")
|
||||
pod.Spec.NodeName = framework.TestContext.NodeName
|
||||
// Node e2e does not support the default DNSClusterFirst policy. Set
|
||||
// the policy to DNSDefault, which is configured per node.
|
||||
pod.Spec.DNSPolicy = v1.DNSDefault
|
||||
|
||||
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
|
||||
// we should not munge ImagePullPolicy for cluster e2e pods.
|
||||
if !TestContext.PrepullImages {
|
||||
if !framework.TestContext.PrepullImages {
|
||||
return
|
||||
}
|
||||
// If prepull is enabled, munge the container spec to make sure the images are not pulled
|
||||
@ -226,7 +226,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -243,7 +243,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
// WaitForFinish waits for pod to finish running, regardless of success or failure.
|
||||
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -260,7 +260,7 @@ func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
|
||||
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
|
||||
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
var ev *v1.Event
|
||||
err := wait.Poll(Poll, PodStartTimeout, func() (bool, error) {
|
||||
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %s", err)
|
||||
@ -284,7 +284,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
// MatchContainerOutput gets output of a container and match expected regexp in the output.
|
||||
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
|
||||
f := c.f
|
||||
output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
|
||||
}
|
||||
@ -301,6 +301,14 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
|
||||
// PodIsReady returns true if the specified pod is ready. Otherwise false.
|
||||
func (c *PodClient) PodIsReady(name string) bool {
|
||||
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
return podutils.IsPodReady(pod)
|
||||
}
|
||||
|
||||
// RemovePodFinalizer removes the pod's finalizer
|
||||
func (c *PodClient) RemoveFinalizer(podName string, finalizerName string) {
|
||||
framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName)
|
||||
c.Update(podName, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil)
|
||||
})
|
||||
}
|
77
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
77
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -18,6 +18,7 @@ package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -34,14 +35,18 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached completed state.
|
||||
var errPodCompleted = fmt.Errorf("pod ran to completion")
|
||||
var errPodCompleted = FinalError(errors.New("pod ran to completion successfully"))
|
||||
|
||||
// errPodFailed is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached a permanent failue state.
|
||||
var errPodFailed = FinalError(errors.New("pod failed permanently"))
|
||||
|
||||
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
|
||||
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
|
||||
@ -59,7 +64,7 @@ func expectNoError(err error, explain ...interface{}) {
|
||||
// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
@ -117,10 +122,10 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
if ctx.Err() != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
// proxy. So, we log the error and retry if this occurs.
|
||||
e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
continue
|
||||
}
|
||||
// The response checker expects the pod's name unless !respondName, in
|
||||
@ -131,20 +136,20 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
what = "expected"
|
||||
want := pod.Name
|
||||
if got != want {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
r.controllerName, i+1, pod.Name, want, got)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
what = "non-empty"
|
||||
if len(got) == 0 {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
r.controllerName, i+1, pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
successes++
|
||||
e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
|
||||
}
|
||||
if successes < len(r.pods.Items) {
|
||||
@ -178,7 +183,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
|
||||
}
|
||||
created = append(created, pod)
|
||||
}
|
||||
e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
|
||||
framework.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
|
||||
|
||||
if int32(len(created)) == replicas {
|
||||
pods.Items = created
|
||||
@ -262,17 +267,17 @@ func LogPodStates(pods []v1.Pod) {
|
||||
maxGraceW++
|
||||
|
||||
// Log pod info. * does space padding, - makes them left-aligned.
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
|
||||
for _, pod := range pods {
|
||||
grace := ""
|
||||
if pod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
|
||||
}
|
||||
e2elog.Logf("") // Final empty line helps for readability.
|
||||
framework.Logf("") // Final empty line helps for readability.
|
||||
}
|
||||
|
||||
// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows
|
||||
@ -281,12 +286,12 @@ func logPodTerminationMessages(pods []v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
for _, status := range pod.Status.InitContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
framework.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
framework.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -325,21 +330,21 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
|
||||
framework.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name)
|
||||
err = os.MkdirAll(logDir, 0755)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err)
|
||||
framework.Logf("Unable to create path '%s'. Err: %v", logDir, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logPath := filepath.Join(logDir, "logs.txt")
|
||||
err = os.WriteFile(logPath, []byte(logs), 0644)
|
||||
if err != nil {
|
||||
e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
|
||||
framework.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -349,7 +354,7 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
|
||||
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
|
||||
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
framework.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
logPodTerminationMessages(pods.Items)
|
||||
@ -403,6 +408,23 @@ func NewAgnhostPod(ns, podName string, volumes []v1.Volume, mounts []v1.VolumeMo
|
||||
return pod
|
||||
}
|
||||
|
||||
func NewAgnhostPodFromContainers(ns, podName string, volumes []v1.Volume, containers ...v1.Container) *v1.Pod {
|
||||
immediate := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: containers[:],
|
||||
Volumes: volumes,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &immediate,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// NewAgnhostContainer returns the container Spec of an agnhost container.
|
||||
func NewAgnhostContainer(containerName string, mounts []v1.VolumeMount, ports []v1.ContainerPort, args ...string) v1.Container {
|
||||
if len(args) == 0 {
|
||||
@ -438,7 +460,7 @@ func newExecPodSpec(ns, generateName string) *v1.Pod {
|
||||
// CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands.
|
||||
// Pod name is uniquely generated.
|
||||
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
|
||||
e2elog.Logf("Creating new exec pod")
|
||||
framework.Logf("Creating new exec pod")
|
||||
pod := newExecPodSpec(ns, generateName)
|
||||
if tweak != nil {
|
||||
tweak(pod)
|
||||
@ -490,7 +512,7 @@ func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames
|
||||
// in namespace ns are in the condition, using c and waiting at most timeout.
|
||||
func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
|
||||
np := len(podNames)
|
||||
e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
|
||||
framework.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
|
||||
type waitPodResult struct {
|
||||
success bool
|
||||
podName string
|
||||
@ -508,11 +530,11 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
|
||||
for range podNames {
|
||||
res := <-result
|
||||
if !res.success {
|
||||
e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
|
||||
framework.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
|
||||
framework.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
|
||||
return success
|
||||
}
|
||||
|
||||
@ -595,12 +617,21 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
|
||||
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
|
||||
secretTTL, err := getNodeTTLAnnotationValue(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
|
||||
framework.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
|
||||
}
|
||||
podLogTimeout := 240*time.Second + secretTTL
|
||||
return podLogTimeout
|
||||
}
|
||||
|
||||
// VerifyPodHasConditionWithType verifies the pod has the expected condition by type
|
||||
func VerifyPodHasConditionWithType(f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
|
||||
if condition := FindPodConditionByType(&pod.Status, cType); condition == nil {
|
||||
framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, cType, pod.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil || len(nodes.Items) == 0 {
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -231,3 +231,13 @@ func mixinRestrictedContainerSecurityContext(container *v1.Container) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FindPodConditionByType loops through all pod conditions in pod status and returns the specified condition.
|
||||
func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditionType) *v1.PodCondition {
|
||||
for _, cond := range podStatus.Conditions {
|
||||
if cond.Type == conditionType {
|
||||
return &cond
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
189
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
189
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@ -33,7 +34,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -81,6 +82,39 @@ func TimeoutError(msg string, observedObjects ...interface{}) *timeoutError {
|
||||
}
|
||||
}
|
||||
|
||||
// FinalError constructs an error that indicates to a poll function that
|
||||
// polling can be stopped immediately because some permanent error has been
|
||||
// encountered that is not going to go away.
|
||||
//
|
||||
// TODO (@pohly): move this into framework once the refactoring from
|
||||
// https://github.com/kubernetes/kubernetes/pull/112043 allows it. Right now it
|
||||
// leads to circular dependencies.
|
||||
func FinalError(err error) error {
|
||||
return &FinalErr{Err: err}
|
||||
}
|
||||
|
||||
type FinalErr struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err *FinalErr) Error() string {
|
||||
if err.Err != nil {
|
||||
return fmt.Sprintf("final error: %s", err.Err.Error())
|
||||
}
|
||||
return "final error, exact problem unknown"
|
||||
}
|
||||
|
||||
func (err *FinalErr) Unwrap() error {
|
||||
return err.Err
|
||||
}
|
||||
|
||||
// IsFinal checks whether the error was marked as final by wrapping some error
|
||||
// with FinalError.
|
||||
func IsFinal(err error) bool {
|
||||
var finalErr *FinalErr
|
||||
return errors.As(err, &finalErr)
|
||||
}
|
||||
|
||||
// maybeTimeoutError returns a TimeoutError if err is a timeout. Otherwise, wrap err.
|
||||
// taskFormat and taskArgs should be the task being performed when the error occurred,
|
||||
// e.g. "waiting for pod to be running".
|
||||
@ -153,7 +187,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
var ignoreNotReady bool
|
||||
badPods := []v1.Pod{}
|
||||
@ -208,25 +242,25 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
case res && err == nil:
|
||||
nOk++
|
||||
case pod.Status.Phase == v1.PodSucceeded:
|
||||
e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
||||
framework.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
||||
// it doesn't make sense to wait for this pod
|
||||
continue
|
||||
case pod.Status.Phase != v1.PodFailed:
|
||||
e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
||||
framework.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
||||
notReady++
|
||||
badPods = append(badPods, pod)
|
||||
default:
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
||||
framework.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
//ignore failed pods that are controlled by some controller
|
||||
// ignore failed pods that are controlled by some controller
|
||||
}
|
||||
}
|
||||
|
||||
e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||
framework.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
||||
e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||
framework.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||
|
||||
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
||||
return true, nil
|
||||
@ -238,14 +272,16 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
if !ignoreNotReady {
|
||||
return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)
|
||||
}
|
||||
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
// If the condition callback returns an error that matches FinalErr (checked with IsFinal),
|
||||
// then polling aborts early.
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
|
||||
framework.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
|
||||
var (
|
||||
lastPodError error
|
||||
lastPod *v1.Pod
|
||||
@ -260,16 +296,18 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
|
||||
lastPod = pod // Don't overwrite if an error occurs after successfully retrieving.
|
||||
|
||||
// log now so that current pod info is reported before calling `condition()`
|
||||
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
framework.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
|
||||
if done, err := condition(pod); done {
|
||||
if err == nil {
|
||||
e2elog.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
|
||||
framework.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
|
||||
}
|
||||
return true, err
|
||||
} else if err != nil {
|
||||
// TODO(#109732): stop polling and return the error in this case.
|
||||
e2elog.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
|
||||
framework.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
|
||||
if IsFinal(err) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
@ -289,10 +327,10 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
|
||||
return maybeTimeoutError(err, "waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc)
|
||||
}
|
||||
|
||||
// WaitForPodsCondition waits for the listed pods to match the given condition.
|
||||
// WaitForAllPodsCondition waits for the listed pods to match the given condition.
|
||||
// To succeed, at least minPods must be listed, and all listed pods must match the condition.
|
||||
func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) {
|
||||
e2elog.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
|
||||
framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
|
||||
var pods *v1.PodList
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
@ -301,7 +339,7 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
if len(pods.Items) < minPods {
|
||||
e2elog.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
|
||||
framework.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -319,12 +357,84 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
|
||||
if len(nonMatchingPods) <= 0 {
|
||||
return true, nil // All pods match.
|
||||
}
|
||||
e2elog.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
|
||||
framework.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
|
||||
return false, nil
|
||||
})
|
||||
return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched)
|
||||
}
|
||||
|
||||
// WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running.
|
||||
func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
if ready, _ := testutils.PodRunningReady(&pod); ready {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods are running, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to be running (want %v, matched %d)", num, matched)
|
||||
}
|
||||
|
||||
// WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state.
|
||||
func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods in scheduling gated state, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to be scheduling gated (want %d, matched %d)", num, matched)
|
||||
}
|
||||
|
||||
// WaitForPodsWithSchedulingGates waits for a given `timeout` to evaluate if a certain amount of pods in given `ns`
|
||||
// match the given `schedulingGates`stay in scheduling gated state.
|
||||
func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
if reflect.DeepEqual(pod.Spec.SchedulingGates, schedulingGates) {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods carry the expected scheduling gates, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to carry the expected scheduling gates (want %d, matched %d)", num, matched)
|
||||
}
|
||||
|
||||
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
|
||||
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
|
||||
@ -344,6 +454,16 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodTerminatingInNamespaceTimeout returns if the pod is terminating, or an error if it is not after the timeout.
|
||||
func WaitForPodTerminatingInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "is terminating", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
|
||||
@ -403,7 +523,9 @@ func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespa
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
case v1.PodFailed:
|
||||
return false, errPodFailed
|
||||
case v1.PodSucceeded:
|
||||
return false, errPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
@ -441,14 +563,17 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp
|
||||
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
e2elog.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
case v1.PodFailed:
|
||||
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
return false, errPodFailed
|
||||
case v1.PodSucceeded:
|
||||
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
return false, errPodCompleted
|
||||
case v1.PodRunning:
|
||||
e2elog.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
|
||||
framework.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
|
||||
return podutils.IsPodReady(pod), nil
|
||||
}
|
||||
e2elog.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
|
||||
framework.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@ -509,7 +634,7 @@ func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, ti
|
||||
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||
var lastPod *v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
e2elog.Logf("Waiting for pod %s to disappear", podName)
|
||||
framework.Logf("Waiting for pod %s to disappear", podName)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
@ -518,14 +643,14 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
|
||||
found := false
|
||||
for i, pod := range pods.Items {
|
||||
if pod.Name == podName {
|
||||
e2elog.Logf("Pod %s still exists", podName)
|
||||
framework.Logf("Pod %s still exists", podName)
|
||||
found = true
|
||||
lastPod = &(pods.Items[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
e2elog.Logf("Pod %s no longer exists", podName)
|
||||
framework.Logf("Pod %s no longer exists", podName)
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
@ -589,7 +714,7 @@ func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selecto
|
||||
// Return the list of matching pods.
|
||||
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(c, ns, opts, 1, "running and ready", podListTimeout, testutils.PodRunningReady)
|
||||
return WaitForAllPodsCondition(c, ns, opts, 1, "running and ready", timeout, testutils.PodRunningReady)
|
||||
}
|
||||
|
||||
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
||||
@ -602,7 +727,7 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
|
||||
pods = FilterNonRestartablePods(allPods)
|
||||
if len(pods) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
|
||||
e2elog.Logf("Error getting pods: %v", errLast)
|
||||
framework.Logf("Error getting pods: %v", errLast)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -688,21 +813,21 @@ func WaitForContainerRunning(c clientset.Interface, namespace, podName, containe
|
||||
|
||||
// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function.
|
||||
// If the error is retryable, sleep the recommended delay and ignore the error.
|
||||
// If the erorr is terminal, return it.
|
||||
// If the error is terminal, return it.
|
||||
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
|
||||
taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
|
||||
if retryNotFound && apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("Ignoring NotFound error while " + taskDescription)
|
||||
framework.Logf("Ignoring NotFound error while " + taskDescription)
|
||||
return false, nil
|
||||
}
|
||||
if retry, delay := shouldRetry(err); retry {
|
||||
e2elog.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
|
||||
framework.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
|
||||
if delay > 0 {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
|
||||
framework.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
87
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
87
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -17,14 +17,8 @@ limitations under the License.
|
||||
package skipper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
@ -44,87 +38,14 @@ import (
|
||||
|
||||
func skipInternalf(caller int, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
// Long term this should get replaced with https://github.com/onsi/ginkgo/issues/1069.
|
||||
framework.Logf(msg)
|
||||
skip(msg, caller+1)
|
||||
}
|
||||
|
||||
// SkipPanic is the value that will be panicked from Skip.
|
||||
type SkipPanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
const ginkgoSkipPanic = `
|
||||
Your test was skipped.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
defer GinkgoRecover()
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
|
||||
// String makes SkipPanic look like the old Ginkgo panic when printed.
|
||||
func (SkipPanic) String() string { return ginkgoSkipPanic }
|
||||
|
||||
// Skip wraps ginkgo.Skip so that it panics with more useful
|
||||
// information about why the test is being skipped. This function will
|
||||
// panic with a SkipPanic.
|
||||
func skip(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
sp := SkipPanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(sp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Skip(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
stack := debug.Stack()
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
|
||||
var prunedStack []string
|
||||
|
||||
// skip the top of the stack
|
||||
for i := 0; i < 2*skip+1; i++ {
|
||||
scanner.Scan()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if stackSkipPattern.Match(scanner.Bytes()) {
|
||||
scanner.Scan() // these come in pairs
|
||||
} else {
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
scanner.Scan() // these come in pairs
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(prunedStack, "\n")
|
||||
ginkgo.Skip(msg, caller+1)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Skipf skips with information about why the test is being skipped.
|
||||
// The direct caller is recorded in the callstack.
|
||||
func Skipf(format string, args ...interface{}) {
|
||||
skipInternalf(1, format, args...)
|
||||
panic("unreachable")
|
||||
|
24
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -125,7 +125,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
|
||||
// If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs.
|
||||
if len(hosts) < len(nodelist.Items) {
|
||||
e2elog.Logf("No external IP address on nodes, falling back to internal IPs")
|
||||
framework.Logf("No external IP address on nodes, falling back to internal IPs")
|
||||
hosts = nodeAddresses(nodelist, v1.NodeInternalIP)
|
||||
}
|
||||
|
||||
@ -146,12 +146,12 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if canConnect(host) {
|
||||
e2elog.Logf("Assuming SSH on host %s", host)
|
||||
framework.Logf("Assuming SSH on host %s", host)
|
||||
sshHostsLock.Lock()
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(host, SSHPort))
|
||||
sshHostsLock.Unlock()
|
||||
} else {
|
||||
e2elog.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
|
||||
framework.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
|
||||
}
|
||||
}(host)
|
||||
}
|
||||
@ -168,7 +168,7 @@ func canConnect(host string) bool {
|
||||
hostPort := net.JoinHostPort(host, SSHPort)
|
||||
conn, err := net.DialTimeout("tcp", hostPort, 3*time.Second)
|
||||
if err != nil {
|
||||
e2elog.Logf("cannot dial %s: %v", hostPort, err)
|
||||
framework.Logf("cannot dial %s: %v", hostPort, err)
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
@ -352,15 +352,15 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
|
||||
// LogResult records result log
|
||||
func LogResult(result Result) {
|
||||
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
|
||||
e2elog.Logf("ssh %s: command: %s", remote, result.Cmd)
|
||||
e2elog.Logf("ssh %s: stdout: %q", remote, result.Stdout)
|
||||
e2elog.Logf("ssh %s: stderr: %q", remote, result.Stderr)
|
||||
e2elog.Logf("ssh %s: exit code: %d", remote, result.Code)
|
||||
framework.Logf("ssh %s: command: %s", remote, result.Cmd)
|
||||
framework.Logf("ssh %s: stdout: %q", remote, result.Stdout)
|
||||
framework.Logf("ssh %s: stderr: %q", remote, result.Stderr)
|
||||
framework.Logf("ssh %s: exit code: %d", remote, result.Code)
|
||||
}
|
||||
|
||||
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
|
||||
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
|
||||
e2elog.Logf("Getting external IP address for %s", node.Name)
|
||||
framework.Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
||||
@ -383,7 +383,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
|
||||
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
|
||||
}
|
||||
|
||||
e2elog.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
|
||||
framework.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
|
||||
result, err := SSH(cmd, host, provider)
|
||||
LogResult(result)
|
||||
|
||||
@ -454,7 +454,7 @@ func expectNoError(err error, explain ...interface{}) {
|
||||
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
72
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
72
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -32,14 +32,15 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
gomegaformat "github.com/onsi/gomega/format"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/utils/kubeconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -197,6 +198,9 @@ type TestContextType struct {
|
||||
|
||||
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
|
||||
// simulate node failures.
|
||||
//
|
||||
// TODO: move this and the corresponding command line flags into
|
||||
// test/e2e/framework/node.
|
||||
type NodeKillerConfig struct {
|
||||
// Enabled determines whether NodeKill should do anything at all.
|
||||
// All other options below are ignored if Enabled = false.
|
||||
@ -256,7 +260,7 @@ type CloudConfig struct {
|
||||
ClusterIPRange string
|
||||
ClusterTag string
|
||||
Network string
|
||||
ConfigFile string // for azure and openstack
|
||||
ConfigFile string // for azure
|
||||
NodeTag string
|
||||
MasterTag string
|
||||
|
||||
@ -304,6 +308,9 @@ func (tc TestContextType) ClusterIsIPv6() bool {
|
||||
// options themselves, copy flags from test/e2e/framework/config
|
||||
// as shown in HandleFlags.
|
||||
func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
// The default is too low for objects like pods, even when using YAML. We double the default.
|
||||
flags.IntVar(&gomegaformat.MaxLength, "gomega-max-length", 8000, "Sets the maximum size for the gomega formatter (= gomega.MaxLength). Use 0 to disable truncation.")
|
||||
|
||||
flags.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.")
|
||||
flags.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
|
||||
flags.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.")
|
||||
@ -424,44 +431,6 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.DurationVar(&nodeKiller.SimulatedDowntime, "node-killer-simulated-downtime", 10*time.Minute, "A delay between node death and recreation")
|
||||
}
|
||||
|
||||
func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
contextNick := "context"
|
||||
|
||||
configCmd := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.TokenFile = clientCfg.BearerTokenFile
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
}
|
||||
credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile
|
||||
if len(credentials.ClientKey) == 0 {
|
||||
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
|
||||
}
|
||||
configCmd.AuthInfos[userNick] = credentials
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = clientCfg.Host
|
||||
cluster.CertificateAuthority = clientCfg.CAFile
|
||||
if len(cluster.CertificateAuthority) == 0 {
|
||||
cluster.CertificateAuthorityData = clientCfg.CAData
|
||||
}
|
||||
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
|
||||
configCmd.Clusters[clusterNick] = cluster
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.Cluster = clusterNick
|
||||
context.AuthInfo = userNick
|
||||
configCmd.Contexts[contextNick] = context
|
||||
configCmd.CurrentContext = contextNick
|
||||
|
||||
return configCmd
|
||||
}
|
||||
|
||||
// GenerateSecureToken returns a string of length tokenLen, consisting
|
||||
// of random bytes encoded as base64 for use as a Bearer Token during
|
||||
// communication with an APIServer
|
||||
@ -491,6 +460,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
fs.Set("logtostderr", "false")
|
||||
fs.Set("alsologtostderr", "false")
|
||||
fs.Set("one_output", "true")
|
||||
fs.Set("stderrthreshold", "10" /* higher than any of the severities -> none pass the threshold */)
|
||||
klog.SetOutput(ginkgo.GinkgoWriter)
|
||||
|
||||
// Only set a default host if one won't be supplied via kubeconfig
|
||||
@ -498,7 +468,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
// Check if we can use the in-cluster config
|
||||
if clusterConfig, err := restclient.InClusterConfig(); err == nil {
|
||||
if tempFile, err := os.CreateTemp(os.TempDir(), "kubeconfig-"); err == nil {
|
||||
kubeConfig := createKubeConfig(clusterConfig)
|
||||
kubeConfig := kubeconfig.CreateKubeConfig(clusterConfig)
|
||||
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
|
||||
t.KubeConfig = tempFile.Name()
|
||||
klog.V(4).Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
|
||||
@ -560,6 +530,14 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// This is the traditional gomega.Format default of 4000 for an object
|
||||
// dump plus some extra room for the message.
|
||||
maxFailureMessageSize = 5000
|
||||
|
||||
truncatedMsg = "\n[... see output for full dump ...]\n"
|
||||
)
|
||||
|
||||
// writeJUnitReport generates a JUnit file in the e2e report directory that is
|
||||
// shorter than the one normally written by `ginkgo --junit-report`. This is
|
||||
// needed because the full report can become too large for tools like Spyglass
|
||||
@ -576,6 +554,18 @@ func writeJUnitReport(report ginkgo.Report) {
|
||||
if specReport.State != types.SpecStateFailed {
|
||||
specReport.CapturedGinkgoWriterOutput = ""
|
||||
specReport.CapturedStdOutErr = ""
|
||||
} else {
|
||||
// Truncate the failure message if it is too large.
|
||||
msgLen := len(specReport.Failure.Message)
|
||||
if msgLen > maxFailureMessageSize {
|
||||
// Insert full message at the beginning where it is easy to find.
|
||||
specReport.CapturedGinkgoWriterOutput =
|
||||
"Full failure message:\n" +
|
||||
specReport.Failure.Message + "\n\n" +
|
||||
strings.Repeat("=", 70) + "\n\n" +
|
||||
specReport.CapturedGinkgoWriterOutput
|
||||
specReport.Failure.Message = specReport.Failure.Message[0:maxFailureMessageSize/2] + truncatedMsg + specReport.Failure.Message[msgLen-maxFailureMessageSize/2:msgLen]
|
||||
}
|
||||
}
|
||||
|
||||
// Remove report entries generated by ginkgo.By("doing
|
||||
|
692
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
692
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -23,27 +23,22 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -57,28 +52,13 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
|
||||
// TODO(justinsb): Avoid hardcoding this.
|
||||
awsMasterIP = "172.20.0.9"
|
||||
|
||||
// AllContainers specifies that all containers be visited
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
AllContainers = InitContainers | Containers | EphemeralContainers
|
||||
)
|
||||
|
||||
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
|
||||
@ -328,7 +308,10 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return fmt.Errorf("wait for service account %q in namespace %q: %w", serviceAccountName, ns, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
|
||||
@ -540,199 +523,6 @@ func RandomSuffix() string {
|
||||
return strconv.Itoa(rand.Intn(10000))
|
||||
}
|
||||
|
||||
// LookForStringInPodExec looks for the given string in the output of a command
|
||||
// executed in the first container of specified pod.
|
||||
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
|
||||
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForStringInPodExecToContainer(ns, podName, "", command, expectedString, timeout)
|
||||
}
|
||||
|
||||
// LookForStringInPodExecToContainer looks for the given string in the output of a
|
||||
// command executed in specified pod container, or first container if not specified.
|
||||
func LookForStringInPodExecToContainer(ns, podName, containerName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns)}
|
||||
if len(containerName) > 0 {
|
||||
args = append(args, fmt.Sprintf("--container=%s", containerName))
|
||||
}
|
||||
args = append(args, "--")
|
||||
args = append(args, command...)
|
||||
return RunKubectlOrDie(ns, args...)
|
||||
})
|
||||
}
|
||||
|
||||
// lookForString looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// the timeout is reached or the string is found. Returns last log and possibly
|
||||
// error if the string was not found.
|
||||
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
|
||||
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
|
||||
result = fn()
|
||||
if strings.Contains(result, expectedString) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
|
||||
return
|
||||
}
|
||||
|
||||
// KubectlBuilder is used to build, customize and execute a kubectl Command.
|
||||
// Add more functions to customize the builder as needed.
|
||||
type KubectlBuilder struct {
|
||||
cmd *exec.Cmd
|
||||
timeout <-chan time.Time
|
||||
}
|
||||
|
||||
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
|
||||
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
|
||||
b := new(KubectlBuilder)
|
||||
tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath, namespace)
|
||||
b.cmd = tk.KubectlCmd(args...)
|
||||
return b
|
||||
}
|
||||
|
||||
// WithEnv sets the given environment and returns itself.
|
||||
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
|
||||
b.cmd.Env = env
|
||||
return b
|
||||
}
|
||||
|
||||
// WithTimeout sets the given timeout and returns itself.
|
||||
func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder {
|
||||
b.timeout = t
|
||||
return b
|
||||
}
|
||||
|
||||
// WithStdinData sets the given data to stdin and returns itself.
|
||||
func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder {
|
||||
b.cmd.Stdin = strings.NewReader(data)
|
||||
return &b
|
||||
}
|
||||
|
||||
// WithStdinReader sets the given reader and returns itself.
|
||||
func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
|
||||
b.cmd.Stdin = reader
|
||||
return &b
|
||||
}
|
||||
|
||||
// ExecOrDie runs the kubectl executable or dies if error occurs.
|
||||
func (b KubectlBuilder) ExecOrDie(namespace string) string {
|
||||
str, err := b.Exec()
|
||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||
if isTimeout(err) {
|
||||
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
|
||||
time.Sleep(2 * time.Second)
|
||||
retryStr, retryErr := RunKubectl(namespace, "version")
|
||||
Logf("stdout: %q", retryStr)
|
||||
Logf("err: %v", retryErr)
|
||||
}
|
||||
ExpectNoError(err)
|
||||
return str
|
||||
}
|
||||
|
||||
func isTimeout(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Exec runs the kubectl executable.
|
||||
func (b KubectlBuilder) Exec() (string, error) {
|
||||
stdout, _, err := b.ExecWithFullOutput()
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr.
|
||||
func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd := b.cmd
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
|
||||
if err := cmd.Start(); err != nil {
|
||||
return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err)
|
||||
}
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- cmd.Wait()
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
var rc = 127
|
||||
if ee, ok := err.(*exec.ExitError); ok {
|
||||
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
|
||||
Logf("rc: %d", rc)
|
||||
}
|
||||
return stdout.String(), stderr.String(), uexec.CodeExitError{
|
||||
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err),
|
||||
Code: rc,
|
||||
}
|
||||
}
|
||||
case <-b.timeout:
|
||||
b.cmd.Process.Kill()
|
||||
return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr)
|
||||
}
|
||||
Logf("stderr: %q", stderr.String())
|
||||
Logf("stdout: %q", stdout.String())
|
||||
return stdout.String(), stderr.String(), nil
|
||||
}
|
||||
|
||||
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectlOrDie(namespace string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectl is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectl(namespace string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).Exec()
|
||||
}
|
||||
|
||||
// RunKubectlWithFullOutput is a convenience wrapper over kubectlBuilder
|
||||
// It will also return the command's stderr.
|
||||
func RunKubectlWithFullOutput(namespace string, args ...string) (string, string, error) {
|
||||
return NewKubectlCommand(namespace, args...).ExecWithFullOutput()
|
||||
}
|
||||
|
||||
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
|
||||
}
|
||||
|
||||
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
|
||||
func RunKubemciWithKubeconfig(args ...string) (string, error) {
|
||||
if TestContext.KubeConfig != "" {
|
||||
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
|
||||
}
|
||||
return RunKubemciCmd(args...)
|
||||
}
|
||||
|
||||
// RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
|
||||
// It assumes that kubemci exists in PATH.
|
||||
func RunKubemciCmd(args ...string) (string, error) {
|
||||
// kubemci is assumed to be in PATH.
|
||||
kubemci := "kubemci"
|
||||
b := new(KubectlBuilder)
|
||||
args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID)
|
||||
|
||||
b.cmd = exec.Command(kubemci, args...)
|
||||
return b.Exec()
|
||||
}
|
||||
|
||||
// StartCmdAndStreamOutput returns stdout and stderr after starting the given cmd.
|
||||
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
|
||||
stdout, err = cmd.StdoutPipe()
|
||||
@ -755,449 +545,6 @@ func TryKill(cmd *exec.Cmd) {
|
||||
}
|
||||
}
|
||||
|
||||
// testContainerOutputMatcher runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using the given matcher.
|
||||
func (f *Framework) testContainerOutputMatcher(scenarioName string,
|
||||
pod *v1.Pod,
|
||||
containerIndex int,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
|
||||
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
|
||||
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
|
||||
Failf("Invalid container index: %d", containerIndex)
|
||||
}
|
||||
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
|
||||
}
|
||||
|
||||
// ContainerType signifies container type
|
||||
type ContainerType int
|
||||
|
||||
const (
|
||||
// Containers is for normal containers
|
||||
Containers ContainerType = 1 << iota
|
||||
// InitContainers is for init containers
|
||||
InitContainers
|
||||
// EphemeralContainers is for ephemeral containers
|
||||
EphemeralContainers
|
||||
)
|
||||
|
||||
// allFeatureEnabledContainers returns a ContainerType mask which includes all container
|
||||
// types except for the ones guarded by feature gate.
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
func allFeatureEnabledContainers() ContainerType {
|
||||
return AllContainers
|
||||
}
|
||||
|
||||
// ContainerVisitor is called with each container spec, and returns true
|
||||
// if visiting should continue.
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool)
|
||||
|
||||
// visitContainers invokes the visitor function with a pointer to every container
|
||||
// spec in the given pod spec with type set in mask. If visitor returns false,
|
||||
// visiting is short-circuited. visitContainers returns true if visiting completes,
|
||||
// false if visiting was short-circuited.
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
func visitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool {
|
||||
if mask&InitContainers != 0 {
|
||||
for i := range podSpec.InitContainers {
|
||||
if !visitor(&podSpec.InitContainers[i], InitContainers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if mask&Containers != 0 {
|
||||
for i := range podSpec.Containers {
|
||||
if !visitor(&podSpec.Containers[i], Containers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if mask&EphemeralContainers != 0 {
|
||||
for i := range podSpec.EphemeralContainers {
|
||||
if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
|
||||
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
|
||||
func (f *Framework) MatchContainerOutput(
|
||||
pod *v1.Pod,
|
||||
containerName string,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
ns := pod.ObjectMeta.Namespace
|
||||
if ns == "" {
|
||||
ns = f.Namespace.Name
|
||||
}
|
||||
podClient := f.PodClientNS(ns)
|
||||
|
||||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
ginkgo.By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod status: %v", err)
|
||||
}
|
||||
|
||||
if podErr != nil {
|
||||
// Pod failed. Dump all logs from all containers to see what's wrong
|
||||
_ = visitContainers(&podStatus.Spec, allFeatureEnabledContainers(), func(c *v1.Container, containerType ContainerType) bool {
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
|
||||
if err != nil {
|
||||
Logf("Failed to get logs from node %q pod %q container %q: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
|
||||
} else {
|
||||
Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, c.Name, logs)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
|
||||
}
|
||||
|
||||
Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
|
||||
if err != nil {
|
||||
Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %v", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EventsLister is a func that lists events.
|
||||
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
||||
|
||||
// dumpEventsInNamespace dumps events in the given namespace.
|
||||
func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
|
||||
events, err := eventsLister(metav1.ListOptions{}, namespace)
|
||||
ExpectNoError(err, "failed to list events in namespace %q", namespace)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items)))
|
||||
// Sort events by their first timestamp
|
||||
sortedEvents := events.Items
|
||||
if len(sortedEvents) > 1 {
|
||||
sort.Sort(byFirstTimestamp(sortedEvents))
|
||||
}
|
||||
for _, e := range sortedEvents {
|
||||
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||
}
|
||||
// Note that we don't wait for any Cleanup to propagate, which means
|
||||
// that if you delete a bunch of pods right before ending your test,
|
||||
// you may or may not see the killing/deletion/Cleanup events.
|
||||
}
|
||||
|
||||
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
|
||||
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return c.CoreV1().Events(ns).List(context.TODO(), opts)
|
||||
}, namespace)
|
||||
|
||||
e2epod.DumpAllPodInfoForNamespace(c, namespace, TestContext.ReportDir)
|
||||
|
||||
// If cluster is large, then the following logs are basically useless, because:
|
||||
// 1. it takes tens of minutes or hours to grab all of them
|
||||
// 2. there are so many of them that working with them are mostly impossible
|
||||
// So we dump them only if the cluster is relatively small.
|
||||
maxNodesForDump := TestContext.MaxNodesToGather
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("unable to fetch node list: %v", err)
|
||||
return
|
||||
}
|
||||
if len(nodes.Items) <= maxNodesForDump {
|
||||
dumpAllNodeInfo(c, nodes)
|
||||
} else {
|
||||
Logf("skipping dumping cluster info - cluster too large")
|
||||
}
|
||||
}
|
||||
|
||||
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
|
||||
type byFirstTimestamp []v1.Event
|
||||
|
||||
func (o byFirstTimestamp) Len() int { return len(o) }
|
||||
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o byFirstTimestamp) Less(i, j int) bool {
|
||||
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
|
||||
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
|
||||
}
|
||||
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
|
||||
}
|
||||
|
||||
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
|
||||
names := make([]string, len(nodes.Items))
|
||||
for ix := range nodes.Items {
|
||||
names[ix] = nodes.Items[ix].Name
|
||||
}
|
||||
DumpNodeDebugInfo(c, names, Logf)
|
||||
}
|
||||
|
||||
// DumpNodeDebugInfo dumps debug information of the given nodes.
|
||||
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
|
||||
for _, n := range nodeNames {
|
||||
logFunc("\nLogging node info for node %v", n)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting node info %v", err)
|
||||
}
|
||||
logFunc("Node Info: %v", node)
|
||||
|
||||
logFunc("\nLogging kubelet events for node %v", n)
|
||||
for _, e := range getNodeEvents(c, n) {
|
||||
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
|
||||
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
|
||||
}
|
||||
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
|
||||
podList, err := getKubeletPods(c, n)
|
||||
if err != nil {
|
||||
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
|
||||
continue
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.InitContainerStatuses {
|
||||
logFunc("\tInit container %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
for _, c := range p.Status.ContainerStatuses {
|
||||
logFunc("\tContainer %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
}
|
||||
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
|
||||
// TODO: Log node resource info
|
||||
}
|
||||
}
|
||||
|
||||
// getKubeletPods retrieves the list of pods on the kubelet.
|
||||
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
|
||||
var client restclient.Result
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
// call chain tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165
|
||||
client = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, KubeletPort)).
|
||||
Suffix("pods").
|
||||
Do(context.TODO())
|
||||
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
result := &v1.PodList{}
|
||||
if err := client.Into(result); err != nil {
|
||||
return &v1.PodList{}, err
|
||||
}
|
||||
return result, nil
|
||||
case <-time.After(PodGetTimeout):
|
||||
return &v1.PodList{}, fmt.Errorf("Waiting up to %v for getting the list of pods", PodGetTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// logNodeEvents logs kubelet events from the given node. This includes kubelet
|
||||
// restart and node unhealthy events. Note that listing events like this will mess
|
||||
// with latency metrics, beware of calling it during a test.
|
||||
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Node",
|
||||
"involvedObject.name": nodeName,
|
||||
"involvedObject.namespace": metav1.NamespaceAll,
|
||||
"source": "kubelet",
|
||||
}.AsSelector().String()
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
Logf("Unexpected error retrieving node events %v", err)
|
||||
return []v1.Event{}
|
||||
}
|
||||
return events.Items
|
||||
}
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
if TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediate(
|
||||
30*time.Second,
|
||||
timeout,
|
||||
e2enode.CheckReadyForTests(c, TestContext.NonblockingTaints, TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
)
|
||||
}
|
||||
|
||||
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
|
||||
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
|
||||
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasLabel expects that the given node has the given label pair.
|
||||
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
||||
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
ExpectEqual(node.Labels[labelKey], labelValue)
|
||||
}
|
||||
|
||||
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
|
||||
// won't fail if target label doesn't exist or has been removed.
|
||||
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
|
||||
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
|
||||
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
|
||||
|
||||
ginkgo.By("verifying the node doesn't have the label " + labelKey)
|
||||
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasTaint expects that the node has the given taint.
|
||||
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
ginkgo.By("verifying the node has the taint " + taint.ToString())
|
||||
if has, err := NodeHasTaint(c, nodeName, taint); !has {
|
||||
ExpectNoError(err)
|
||||
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// NodeHasTaint returns true if the node has the given taint, else returns false.
|
||||
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
nodeTaints := node.Spec.Taints
|
||||
|
||||
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell.
|
||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||
return RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdWithFullOutput runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell. It will also return the command's stderr.
|
||||
func RunHostCmdWithFullOutput(ns, name, cmd string) (string, string, error) {
|
||||
return RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdOrDie calls RunHostCmd and dies on error.
|
||||
func RunHostCmdOrDie(ns, name, cmd string) string {
|
||||
stdout, err := RunHostCmd(ns, name, cmd)
|
||||
Logf("stdout: %v", stdout)
|
||||
ExpectNoError(err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
|
||||
// until it succeeds or the specified timeout expires.
|
||||
// This can be used with idempotent commands to deflake transient Node issues.
|
||||
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
|
||||
start := time.Now()
|
||||
for {
|
||||
out, err := RunHostCmd(ns, name, cmd)
|
||||
if err == nil {
|
||||
return out, nil
|
||||
}
|
||||
if elapsed := time.Since(start); elapsed > timeout {
|
||||
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
|
||||
}
|
||||
Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
|
||||
// TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
|
||||
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
||||
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
||||
// default test add-ons.
|
||||
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
|
||||
|
||||
var notReady []*v1.Node
|
||||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
|
||||
notReady = append(notReady, node)
|
||||
}
|
||||
}
|
||||
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
|
||||
// to make it possible e.g. for incorrect deployment of some small percentage
|
||||
// of nodes (which we allow in cluster validation). Some nodes that are not
|
||||
// provisioned correctly at startup will never become ready (e.g. when something
|
||||
// won't install correctly), so we can't expect them to be ready at any point.
|
||||
return len(notReady) <= TestContext.AllowedNotReadyNodes, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(notReady) > TestContext.AllowedNotReadyNodes {
|
||||
msg := ""
|
||||
for _, node := range notReady {
|
||||
msg = fmt.Sprintf("%s, %s", msg, node.Name)
|
||||
}
|
||||
return fmt.Errorf("Not ready nodes: %#v", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LookForStringInLog looks for the given string in the log of a specific pod container
|
||||
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return RunKubectlOrDie(ns, "logs", podName, container)
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
|
||||
// are actually cleaned up. Currently only implemented for GCE/GKE.
|
||||
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
@ -1320,25 +667,6 @@ func GetControlPlaneAddresses(c clientset.Interface) []string {
|
||||
return ips.List()
|
||||
}
|
||||
|
||||
// CreateEmptyFileOnPod creates empty file at given path on the pod.
|
||||
// TODO(alejandrox1): move to subpkg pod once kubectl methods have been refactored.
|
||||
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
|
||||
_, err := RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
// DumpDebugInfo dumps debug info of tests.
|
||||
func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := RunKubectl(ns, "describe", "po", s.Name)
|
||||
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := RunKubectl(ns, "logs", s.Name, "--tail=100")
|
||||
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrintJSON converts metrics to JSON format.
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
@ -1354,20 +682,10 @@ func PrettyPrintJSON(metrics interface{}) string {
|
||||
return formatted.String()
|
||||
}
|
||||
|
||||
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WatchEventSequenceVerifier ...
|
||||
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
|
||||
//
|
||||
// testContext cancelation signal across API boundries, e.g: context.TODO()
|
||||
// testContext cancellation signal across API boundaries, e.g: context.TODO()
|
||||
// dc sets up a client to the API
|
||||
// resourceType specify the type of resource
|
||||
// namespace select a namespace
|
||||
|
84
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
84
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -31,8 +31,8 @@ limitations under the License.
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
* be used in production.
|
||||
*
|
||||
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||
* 2) With server outside of Kubernetes
|
||||
* Appropriate server must exist somewhere outside
|
||||
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
@ -56,7 +56,9 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
@ -166,65 +168,6 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
|
||||
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) {
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "gluster",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 24007,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create service for Gluster server")
|
||||
|
||||
ginkgo.By("creating Gluster endpoints")
|
||||
endpoints := &v1.Endpoints{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Endpoints",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: ip,
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: "gluster",
|
||||
Port: 24007,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
|
||||
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
@ -413,7 +356,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
}
|
||||
}
|
||||
if config.ServerReadyMessage != "" {
|
||||
_, err := framework.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
|
||||
_, err := e2epodoutput.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
|
||||
framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
|
||||
}
|
||||
return pod
|
||||
@ -534,7 +477,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
// Block: check content
|
||||
deviceName := fmt.Sprintf("/opt/%d", i)
|
||||
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
|
||||
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||
|
||||
// Check that it's a real block device
|
||||
@ -543,7 +486,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
// Filesystem: check content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands := GenerateReadFileCmd(fileName)
|
||||
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||
|
||||
// Check that a directory has been mounted
|
||||
@ -554,14 +497,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
// Filesystem: check fsgroup
|
||||
if fsGroup != nil {
|
||||
ginkgo.By("Checking fsGroup is correct.")
|
||||
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
|
||||
}
|
||||
|
||||
// Filesystem: check fsType
|
||||
if fsType != "" {
|
||||
ginkgo.By("Checking fsType is correct.")
|
||||
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
|
||||
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
|
||||
}
|
||||
}
|
||||
@ -608,7 +551,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
|
||||
}
|
||||
ec.Resources = v1.ResourceRequirements{}
|
||||
ec.Name = "volume-ephemeral-container"
|
||||
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
|
||||
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
|
||||
// The API server will return NotFound for the subresource when the feature is disabled
|
||||
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
|
||||
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
|
||||
@ -646,7 +589,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
|
||||
}
|
||||
out, err := framework.RunKubectl(injectorPod.Namespace, commands...)
|
||||
out, err := e2ekubectl.RunKubectl(injectorPod.Namespace, commands...)
|
||||
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
|
||||
}
|
||||
|
||||
@ -658,7 +601,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
func generateWriteCmd(content, path string) []string {
|
||||
var commands []string
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path + "; sync"}
|
||||
return commands
|
||||
}
|
||||
|
||||
@ -707,7 +650,7 @@ func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persi
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
@ -716,7 +659,6 @@ func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string
|
||||
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
|
||||
if exiterr, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := exiterr.ExitStatus()
|
||||
framework.ExpectNoError(err,
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -275,6 +275,7 @@ var factories = map[What]ItemFactory{
|
||||
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
|
||||
{"CSIDriver"}: &csiDriverFactory{},
|
||||
{"DaemonSet"}: &daemonSetFactory{},
|
||||
{"ReplicaSet"}: &replicaSetFactory{},
|
||||
{"Role"}: &roleFactory{},
|
||||
{"RoleBinding"}: &roleBindingFactory{},
|
||||
{"Secret"}: &secretFactory{},
|
||||
@ -315,7 +316,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
case *rbacv1.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
// All those names are excempt from renaming. That list could be populated by querying
|
||||
// All those names are exempt from renaming. That list could be populated by querying
|
||||
// and get extended by tests.
|
||||
if item.Name != "e2e-test-privileged-psp" {
|
||||
PatchName(f, &item.Name)
|
||||
@ -382,6 +383,14 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
|
||||
return err
|
||||
}
|
||||
case *appsv1.ReplicaSet:
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
|
||||
return err
|
||||
}
|
||||
case *apiextensionsv1.CustomResourceDefinition:
|
||||
// Do nothing. Patching name to all CRDs won't always be the expected behavior.
|
||||
default:
|
||||
@ -584,6 +593,27 @@ func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i inte
|
||||
}, nil
|
||||
}
|
||||
|
||||
type replicaSetFactory struct{}
|
||||
|
||||
func (f *replicaSetFactory) New() runtime.Object {
|
||||
return &appsv1.ReplicaSet{}
|
||||
}
|
||||
|
||||
func (*replicaSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*appsv1.ReplicaSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().ReplicaSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create ReplicaSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type storageClassFactory struct{}
|
||||
|
||||
func (f *storageClassFactory) New() runtime.Object {
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
@ -152,6 +152,9 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
|
||||
if o.FSGroupPolicy != nil {
|
||||
object.Spec.FSGroupPolicy = o.FSGroupPolicy
|
||||
}
|
||||
if o.SELinuxMount != nil {
|
||||
object.Spec.SELinuxMount = o.SELinuxMount
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -211,4 +214,8 @@ type PatchCSIOptions struct {
|
||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||
// otherwise.
|
||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||
// If not nil, the value to use for the CSIDriver.Spec.SELinuxMount
|
||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||
// otherwise.
|
||||
SELinuxMount *bool
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/host_exec.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/host_exec.go
generated
vendored
@ -149,7 +149,7 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
|
||||
}
|
||||
containerName := pod.Spec.Containers[0].Name
|
||||
var err error
|
||||
result.Stdout, result.Stderr, err = h.Framework.ExecWithOptions(framework.ExecOptions{
|
||||
result.Stdout, result.Stderr, err = e2epod.ExecWithOptions(h.Framework, e2epod.ExecOptions{
|
||||
Command: args,
|
||||
Namespace: pod.Namespace,
|
||||
PodName: pod.Name,
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
@ -152,7 +152,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
break
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
|
||||
if !isPidChanged {
|
||||
framework.Fail("Kubelet PID remained unchanged after restarting Kubelet")
|
||||
}
|
||||
|
||||
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
@ -80,9 +80,9 @@ func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, p
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
|
74
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
74
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -95,45 +95,44 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
path := "/mnt/volume1"
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, volumePath)
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
path := "/mnt/volume1"
|
||||
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
// checkSubpath is true indicating whether the subpath should be checked.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
|
||||
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
|
||||
nodeIP, err := getHostAddress(c, clientPod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
@ -152,6 +151,11 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
}
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||
defer func() {
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
@ -159,6 +163,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
ginkgo.By("Stopping the kubelet.")
|
||||
KubeletCommand(KStop, c, clientPod)
|
||||
|
||||
if secondPod != nil {
|
||||
ginkgo.By("Starting the second pod")
|
||||
_, err = c.CoreV1().Pods(clientPod.Namespace).Create(context.TODO(), secondPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "when starting the second pod")
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
||||
if forceDelete {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
|
||||
@ -180,6 +190,29 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
|
||||
if secondPod != nil {
|
||||
ginkgo.By("Waiting for the second pod.")
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, secondPod)
|
||||
framework.ExpectNoError(err, "while waiting for the second pod Running")
|
||||
|
||||
ginkgo.By("Getting the second pod uuid.")
|
||||
secondPod, err := c.CoreV1().Pods(secondPod.Namespace).Get(context.TODO(), secondPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "getting the second UID")
|
||||
|
||||
ginkgo.By("Expecting the volume mount to be found in the second pod.")
|
||||
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
ginkgo.By("Testing that written file is accessible in the second pod.")
|
||||
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "when deleting the second pod")
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
framework.ExpectNoError(err, "when waiting for the second pod to disappear")
|
||||
}
|
||||
|
||||
ginkgo.By("Expecting the volume mount not to be found.")
|
||||
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
@ -195,21 +228,22 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
|
||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
|
||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
|
||||
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
|
||||
nodeIP, err := getHostAddress(c, clientPod)
|
||||
framework.ExpectNoError(err, "Failed to get nodeIP.")
|
||||
nodeIP = nodeIP + ":22"
|
||||
@ -280,13 +314,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
|
||||
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false, devicePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
|
||||
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true, devicePath)
|
||||
}
|
||||
|
||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
|
@ -52,8 +52,8 @@ spec:
|
||||
# Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/
|
||||
# and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS.
|
||||
|
||||
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.3 - suitable for COS M85 as per https://cloud.google.com/container-optimized-os/docs/release-notes#cos-85-13310-1209-3
|
||||
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.5
|
||||
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 - suitable for COS M97 as per https://cloud.google.com/container-optimized-os/docs/release-notes
|
||||
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.27
|
||||
name: nvidia-driver-installer
|
||||
resources:
|
||||
requests:
|
||||
@ -81,6 +81,6 @@ spec:
|
||||
- name: root-mount
|
||||
mountPath: /root
|
||||
containers:
|
||||
- image: "registry.k8s.io/pause:3.8"
|
||||
- image: "registry.k8s.io/pause:3.9"
|
||||
name: pause
|
||||
|
||||
|
@ -39,7 +39,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -73,7 +73,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -102,7 +102,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
|
@ -184,7 +184,7 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
@ -13,7 +13,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/
|
||||
|
||||
Do not edit manually. Run ./update-hostpath.sh to refresh the content.
|
||||
|
@ -218,7 +218,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -261,7 +261,7 @@ spec:
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
@ -275,7 +275,7 @@ spec:
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -303,13 +303,13 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -323,7 +323,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -338,7 +338,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
@ -352,7 +352,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
@ -1,4 +1,4 @@
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -34,7 +34,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -35,7 +35,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
|
@ -46,7 +46,7 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh
generated
vendored
@ -47,14 +47,14 @@ trap "rm -rf csi-driver-host-path" EXIT
|
||||
# Main YAML files.
|
||||
mkdir hostpath
|
||||
cat >hostpath/README.md <<EOF
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/$hostpath_version/deploy/
|
||||
|
||||
Do not edit manually. Run $script to refresh the content.
|
||||
EOF
|
||||
cp -r csi-driver-host-path/deploy/kubernetes-latest/hostpath hostpath/
|
||||
cat >hostpath/hostpath/e2e-test-rbac.yaml <<EOF
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
6
vendor/k8s.io/kubernetes/test/utils/conditions.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/utils/conditions.go
generated
vendored
@ -19,7 +19,7 @@ package utils
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
@ -52,6 +52,10 @@ func PodRunningReadyOrSucceeded(p *v1.Pod) (bool, error) {
|
||||
return PodRunningReady(p)
|
||||
}
|
||||
|
||||
func PodSucceeded(p *v1.Pod) (bool, error) {
|
||||
return p.Status.Phase == v1.PodSucceeded, nil
|
||||
}
|
||||
|
||||
// FailedContainers inspects all containers in a pod and returns failure
|
||||
// information for containers that have failed or been restarted.
|
||||
// A map is returned where the key is the containerID and the value is a
|
||||
|
2
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
@ -65,7 +65,7 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod)
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v ", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v ", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
@ -32,7 +32,7 @@ import (
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
func DeleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
switch kind {
|
||||
case api.Kind("Pod"):
|
||||
return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options)
|
||||
@ -59,7 +59,7 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam
|
||||
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
deleteFunc := func() (bool, error) {
|
||||
err := deleteResource(c, kind, namespace, name, options)
|
||||
err := DeleteResource(c, kind, namespace, name, options)
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
34
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -231,41 +231,41 @@ const (
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
|
||||
configs := map[ImageID]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.40"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.43"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.5"}
|
||||
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.7"}
|
||||
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-2"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.1.1"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.5-0"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.1.2"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.6-0"}
|
||||
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-2"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-2"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
configs[IpcUtils] = Config{list.PromoterE2eRegistry, "ipc-utils", "1.3"}
|
||||
configs[JessieDnsutils] = Config{list.PromoterE2eRegistry, "jessie-dnsutils", "1.5"}
|
||||
configs[Kitten] = Config{list.PromoterE2eRegistry, "kitten", "1.5"}
|
||||
configs[Nautilus] = Config{list.PromoterE2eRegistry, "nautilus", "1.5"}
|
||||
configs[JessieDnsutils] = Config{list.PromoterE2eRegistry, "jessie-dnsutils", "1.7"}
|
||||
configs[Kitten] = Config{list.PromoterE2eRegistry, "kitten", "1.7"}
|
||||
configs[Nautilus] = Config{list.PromoterE2eRegistry, "nautilus", "1.7"}
|
||||
configs[NFSProvisioner] = Config{list.SigStorageRegistry, "nfs-provisioner", "v3.0.1"}
|
||||
configs[Nginx] = Config{list.PromoterE2eRegistry, "nginx", "1.14-2"}
|
||||
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-2"}
|
||||
configs[Nginx] = Config{list.PromoterE2eRegistry, "nginx", "1.14-4"}
|
||||
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-4"}
|
||||
configs[NodePerfNpbEp] = Config{list.PromoterE2eRegistry, "node-perf/npb-ep", "1.2"}
|
||||
configs[NodePerfNpbIs] = Config{list.PromoterE2eRegistry, "node-perf/npb-is", "1.2"}
|
||||
configs[NodePerfTfWideDeep] = Config{list.PromoterE2eRegistry, "node-perf/tf-wide-deep", "1.2"}
|
||||
configs[NodePerfTfWideDeep] = Config{list.PromoterE2eRegistry, "node-perf/tf-wide-deep", "1.3"}
|
||||
configs[Nonewprivs] = Config{list.PromoterE2eRegistry, "nonewprivs", "1.3"}
|
||||
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.2"}
|
||||
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.4"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.8"}
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.9"}
|
||||
configs[Perl] = Config{list.PromoterE2eRegistry, "perl", "5.26"}
|
||||
configs[PrometheusDummyExporter] = Config{list.GcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{list.GcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-1"}
|
||||
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-3"}
|
||||
configs[RegressionIssue74839] = Config{list.PromoterE2eRegistry, "regression-issue-74839", "1.2"}
|
||||
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.10"}
|
||||
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.13"}
|
||||
configs[SdDummyExporter] = Config{list.GcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.3"}
|
||||
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.3"}
|
||||
|
62
vendor/k8s.io/kubernetes/test/utils/kubeconfig/kubeconfig.go
generated
vendored
Normal file
62
vendor/k8s.io/kubernetes/test/utils/kubeconfig/kubeconfig.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeconfig
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
// CreateKubeConfig converts a [rest.Config] into a [clientcmdapi.Config]
|
||||
// which then can be written to a file with [clientcmd.WriteToFile].
|
||||
func CreateKubeConfig(clientCfg *rest.Config) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
contextNick := "context"
|
||||
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.TokenFile = clientCfg.BearerTokenFile
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
}
|
||||
credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile
|
||||
if len(credentials.ClientKey) == 0 {
|
||||
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
|
||||
}
|
||||
config.AuthInfos[userNick] = credentials
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = clientCfg.Host
|
||||
cluster.CertificateAuthority = clientCfg.CAFile
|
||||
if len(cluster.CertificateAuthority) == 0 {
|
||||
cluster.CertificateAuthorityData = clientCfg.CAData
|
||||
}
|
||||
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
|
||||
config.Clusters[clusterNick] = cluster
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.Cluster = clusterNick
|
||||
context.AuthInfo = userNick
|
||||
config.Contexts[contextNick] = context
|
||||
config.CurrentContext = contextNick
|
||||
|
||||
return config
|
||||
}
|
4
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -1319,7 +1319,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "registry.k8s.io/pause:3.8",
|
||||
Image: "registry.k8s.io/pause:3.9",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
@ -1741,7 +1741,7 @@ type DaemonConfig struct {
|
||||
|
||||
func (config *DaemonConfig) Run() error {
|
||||
if config.Image == "" {
|
||||
config.Image = "registry.k8s.io/pause:3.8"
|
||||
config.Image = "registry.k8s.io/pause:3.9"
|
||||
}
|
||||
nameLabel := map[string]string{
|
||||
"name": config.Name + "-daemon",
|
||||
|
Reference in New Issue
Block a user