mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: update kubernetes to latest
updating the kubernetes release to the latest in main go.mod Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
63c4c05b35
commit
5a66991bb3
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -36,9 +36,9 @@ rules:
|
||||
allowedPrefixes: [
|
||||
"gopkg.in/inf.v0",
|
||||
"gopkg.in/yaml.v2",
|
||||
"gopkg.in/evanphx/json-patch.v4",
|
||||
"github.com/blang/semver/",
|
||||
"github.com/davecgh/go-spew/spew",
|
||||
"github.com/evanphx/json-patch",
|
||||
"github.com/go-logr/logr",
|
||||
"github.com/gogo/protobuf/proto",
|
||||
"github.com/gogo/protobuf/sortkeys",
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
@ -118,14 +118,14 @@ func DumpNodeDebugInfo(ctx context.Context, c clientset.Interface, nodeNames []s
|
||||
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
|
||||
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
|
||||
}
|
||||
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
|
||||
logFunc("\nLogging pods the kubelet thinks are on node %v", n)
|
||||
podList, err := getKubeletPods(ctx, c, n)
|
||||
if err != nil {
|
||||
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
|
||||
continue
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
logFunc("%s/%s started at %v (%d+%d container statuses recorded)", p.Namespace, p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.InitContainerStatuses {
|
||||
logFunc("\tInit container %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -293,15 +293,6 @@ func (f *FailureError) backtrace() {
|
||||
// }
|
||||
var ErrFailure error = FailureError{}
|
||||
|
||||
// ExpectError expects an error happens, otherwise an exception raises
|
||||
//
|
||||
// Deprecated: use gomega.Expect().To(gomega.HaveOccurred()) or (better!) check
|
||||
// specifically for the error that is expected with
|
||||
// gomega.Expect().To(gomega.MatchError(gomega.ContainSubstring()))
|
||||
func ExpectError(err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
func ExpectNoError(err error, explain ...interface{}) {
|
||||
ExpectNoErrorWithOffset(1, err, explain...)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -720,7 +720,7 @@ func (cl *ClusterVerification) WaitFor(ctx context.Context, atLeast int, timeout
|
||||
pods := []v1.Pod{}
|
||||
var returnedErr error
|
||||
|
||||
err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
pods, returnedErr = cl.podState.filter(ctx, cl.client, cl.namespace)
|
||||
|
||||
// Failure
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
@ -209,8 +209,9 @@ func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interf
|
||||
case label:
|
||||
fullLabel := strings.Join(arg.parts, ":")
|
||||
addLabel(fullLabel)
|
||||
if arg.extra != "" {
|
||||
addLabel(arg.extra)
|
||||
if arg.extraFeature != "" {
|
||||
texts = append(texts, fmt.Sprintf("[%s]", arg.extraFeature))
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Label("Feature:"+arg.extraFeature))
|
||||
}
|
||||
if fullLabel == "Serial" {
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Serial)
|
||||
@ -309,6 +310,10 @@ func validateText(location types.CodeLocation, text string, labels []string) {
|
||||
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s instead", tag, tag))
|
||||
}
|
||||
if deprecatedStability.Has(tag) {
|
||||
if slices.Contains(labels, "Feature:"+tag) {
|
||||
// Okay, was also set as label.
|
||||
continue
|
||||
}
|
||||
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added by defining the feature gate through WithFeatureGate instead", tag))
|
||||
}
|
||||
if index := strings.Index(tag, ":"); index > 0 {
|
||||
@ -353,6 +358,16 @@ func withFeature(name Feature) interface{} {
|
||||
// [k8s.io/apiserver/pkg/util/feature.DefaultMutableFeatureGate]. Once a
|
||||
// feature gate gets removed from there, the WithFeatureGate calls using it
|
||||
// also need to be removed.
|
||||
//
|
||||
// [Alpha] resp. [Beta] get added to the test name automatically depending
|
||||
// on the current stability level of the feature. Feature:Alpha resp.
|
||||
// Feature:Beta get added to the Ginkgo labels because this is a special
|
||||
// requirement for how the cluster needs to be configured.
|
||||
//
|
||||
// If the test can run in any cluster that has alpha resp. beta features and
|
||||
// API groups enabled, then annotating it with just WithFeatureGate is
|
||||
// sufficient. Otherwise, WithFeature has to be used to define the additional
|
||||
// requirements.
|
||||
func WithFeatureGate(featureGate featuregate.Feature) interface{} {
|
||||
return withFeatureGate(featureGate)
|
||||
}
|
||||
@ -376,7 +391,7 @@ func withFeatureGate(featureGate featuregate.Feature) interface{} {
|
||||
}
|
||||
|
||||
l := newLabel("FeatureGate", string(featureGate))
|
||||
l.extra = level
|
||||
l.extraFeature = level
|
||||
return l
|
||||
}
|
||||
|
||||
@ -544,8 +559,9 @@ func withFlaky() interface{} {
|
||||
type label struct {
|
||||
// parts get concatenated with ":" to build the full label.
|
||||
parts []string
|
||||
// extra is an optional fully-formed extra label.
|
||||
extra string
|
||||
// extra is an optional feature name. It gets added as [<extraFeature>]
|
||||
// to the test name and as Feature:<extraFeature> to the labels.
|
||||
extraFeature string
|
||||
// explanation gets set for each label to help developers
|
||||
// who pass a label to a ginkgo function. They need to use
|
||||
// the corresponding framework function instead.
|
||||
@ -572,7 +588,7 @@ func TagsEqual(a, b interface{}) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if al.extra != bl.extra {
|
||||
if al.extraFeature != bl.extraFeature {
|
||||
return false
|
||||
}
|
||||
return slices.Equal(al.parts, bl.parts)
|
||||
|
47
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kube_proxy_metrics.go
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kube_proxy_metrics.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
)
|
||||
|
||||
// KubeProxyMetrics is metrics for kube-proxy
|
||||
type KubeProxyMetrics testutil.Metrics
|
||||
|
||||
// GetCounterMetricValue returns value for metric type counter.
|
||||
func (m *KubeProxyMetrics) GetCounterMetricValue(metricName string) (float64, error) {
|
||||
if len(testutil.Metrics(*m)[metricName]) == 0 {
|
||||
return 0, fmt.Errorf("metric '%s' not found", metricName)
|
||||
}
|
||||
return float64(testutil.Metrics(*m)[metricName][0].Value), nil
|
||||
}
|
||||
|
||||
func newKubeProxyMetricsMetrics() KubeProxyMetrics {
|
||||
result := testutil.NewMetrics()
|
||||
return KubeProxyMetrics(result)
|
||||
}
|
||||
|
||||
func parseKubeProxyMetrics(data string) (KubeProxyMetrics, error) {
|
||||
result := newKubeProxyMetricsMetrics()
|
||||
if err := testutil.ParseMetrics(data, (*testutil.Metrics)(&result)); err != nil {
|
||||
return KubeProxyMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
45
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -32,8 +33,9 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -43,6 +45,8 @@ const (
|
||||
kubeControllerManagerPort = 10257
|
||||
// snapshotControllerPort is the port for the snapshot controller
|
||||
snapshotControllerPort = 9102
|
||||
// kubeProxyPort is the default port for the kube-proxy status server.
|
||||
kubeProxyPort = 10249
|
||||
)
|
||||
|
||||
// MetricsGrabbingDisabledError is an error that is wrapped by the
|
||||
@ -233,6 +237,45 @@ func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubel
|
||||
}
|
||||
}
|
||||
|
||||
// GrabFromKubeProxy returns metrics from kube-proxy
|
||||
func (g *Grabber) GrabFromKubeProxy(ctx context.Context, nodeName string) (KubeProxyMetrics, error) {
|
||||
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
|
||||
if err != nil {
|
||||
return KubeProxyMetrics{}, err
|
||||
}
|
||||
|
||||
if len(nodes.Items) != 1 {
|
||||
return KubeProxyMetrics{}, fmt.Errorf("error listing nodes with name %v, got %v", nodeName, nodes.Items)
|
||||
}
|
||||
output, err := g.grabFromKubeProxy(ctx, nodeName)
|
||||
if err != nil {
|
||||
return KubeProxyMetrics{}, err
|
||||
}
|
||||
return parseKubeProxyMetrics(output)
|
||||
}
|
||||
|
||||
func (g *Grabber) grabFromKubeProxy(ctx context.Context, nodeName string) (string, error) {
|
||||
hostCmdPodName := fmt.Sprintf("grab-kube-proxy-metrics-%s", framework.RandomSuffix())
|
||||
hostCmdPod := e2epod.NewExecPodSpec(metav1.NamespaceSystem, hostCmdPodName, true)
|
||||
nodeSelection := e2epod.NodeSelection{Name: nodeName}
|
||||
e2epod.SetNodeSelection(&hostCmdPod.Spec, nodeSelection)
|
||||
if _, err := g.client.CoreV1().Pods(metav1.NamespaceSystem).Create(ctx, hostCmdPod, metav1.CreateOptions{}); err != nil {
|
||||
return "", fmt.Errorf("failed to create pod to fetch metrics: %w", err)
|
||||
}
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, hostCmdPodName, metav1.NamespaceSystem, 5*time.Minute); err != nil {
|
||||
return "", fmt.Errorf("error waiting for pod to be up: %w", err)
|
||||
}
|
||||
|
||||
host := "127.0.0.1"
|
||||
if framework.TestContext.ClusterIsIPv6() {
|
||||
host = "::1"
|
||||
}
|
||||
|
||||
stdout, err := e2epodoutput.RunHostCmd(metav1.NamespaceSystem, hostCmdPodName, fmt.Sprintf("curl --silent %s/metrics", net.JoinHostPort(host, strconv.Itoa(kubeProxyPort))))
|
||||
_ = g.client.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, hostCmdPodName, metav1.DeleteOptions{})
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// GrabFromScheduler returns metrics from scheduler
|
||||
func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, error) {
|
||||
if !g.grabFromScheduler {
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
@ -46,10 +46,11 @@ func WaitForAllNodesSchedulable(ctx context.Context, c clientset.Interface, time
|
||||
}
|
||||
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediateWithContext(
|
||||
return wait.PollUntilContextTimeout(
|
||||
ctx,
|
||||
30*time.Second,
|
||||
timeout,
|
||||
true,
|
||||
CheckReadyForTests(ctx, c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -131,7 +131,7 @@ func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interfa
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
command = InfiniteSleepCommand
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -172,7 +172,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
return nil, fmt.Errorf("Cannot create pod with empty namespace")
|
||||
}
|
||||
if len(podConfig.Command) == 0 {
|
||||
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
|
||||
podConfig.Command = InfiniteSleepCommand
|
||||
}
|
||||
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
@ -51,6 +51,10 @@ type ExecOptions struct {
|
||||
// returning stdout, stderr and error. `options` allowed for
|
||||
// additional parameters to be passed.
|
||||
func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, string, error) {
|
||||
return ExecWithOptionsContext(context.Background(), f, options)
|
||||
}
|
||||
|
||||
func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options ExecOptions) (string, string, error) {
|
||||
if !options.Quiet {
|
||||
framework.Logf("ExecWithOptions %+v", options)
|
||||
}
|
||||
@ -77,7 +81,8 @@ func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, strin
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
err = execute(ctx, "POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
@ -139,12 +144,12 @@ func ExecShellInPodWithFullOutput(ctx context.Context, f *framework.Framework, p
|
||||
return execCommandInPodWithFullOutput(ctx, f, podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
func execute(ctx context.Context, method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, method, url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
|
63
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
63
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
ginkgotypes "github.com/onsi/ginkgo/v2/types"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -56,6 +57,19 @@ const (
|
||||
|
||||
// it is copied from k8s.io/kubernetes/pkg/kubelet/sysctl
|
||||
forbiddenReason = "SysctlForbidden"
|
||||
|
||||
// which test created this pod?
|
||||
AnnotationTestOwner = "owner.test"
|
||||
)
|
||||
|
||||
// global flags so we can enable features per-suite instead of per-client.
|
||||
var (
|
||||
// GlobalOwnerTracking controls if newly created PodClients should automatically annotate
|
||||
// the pod with the owner test. The owner test is identified by "sourcecodepath:linenumber".
|
||||
// Annotating the pods this way is useful to troubleshoot tests which do insufficient cleanup.
|
||||
// Default is false to maximize backward compatibility.
|
||||
// See also: WithOwnerTracking, AnnotationTestOwner
|
||||
GlobalOwnerTracking bool
|
||||
)
|
||||
|
||||
// ImagePrePullList is the images used in the current test suite. It should be initialized in test suite and
|
||||
@ -68,9 +82,10 @@ var ImagePrePullList sets.String
|
||||
// node e2e pod scheduling.
|
||||
func NewPodClient(f *framework.Framework) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
namespace: f.Namespace.Name,
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
namespace: f.Namespace.Name,
|
||||
ownerTracking: GlobalOwnerTracking,
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,9 +94,10 @@ func NewPodClient(f *framework.Framework) *PodClient {
|
||||
// node e2e pod scheduling.
|
||||
func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
namespace: namespace,
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
namespace: namespace,
|
||||
ownerTracking: GlobalOwnerTracking,
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,19 +105,34 @@ func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
type PodClient struct {
|
||||
f *framework.Framework
|
||||
v1core.PodInterface
|
||||
namespace string
|
||||
namespace string
|
||||
ownerTracking bool
|
||||
}
|
||||
|
||||
// WithOwnerTracking controls automatic add of annotations recording the code location
|
||||
// which created a pod. This is helpful when troubleshooting e2e tests (like e2e_node)
|
||||
// which leak pods because insufficient cleanup.
|
||||
// Note we want a shallow clone to avoid mutating the receiver.
|
||||
// The default is the value of GlobalOwnerTracking *when the client was created*.
|
||||
func (c PodClient) WithOwnerTracking(value bool) *PodClient {
|
||||
c.ownerTracking = value
|
||||
return &c
|
||||
}
|
||||
|
||||
// Create creates a new pod according to the framework specifications (don't wait for it to start).
|
||||
func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
c.mungeSpec(pod)
|
||||
c.setOwnerAnnotation(pod)
|
||||
p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Error creating Pod")
|
||||
return p
|
||||
|
||||
}
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
|
||||
func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
p := c.Create(ctx, pod)
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, c.namespace, framework.PodStartTimeout))
|
||||
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
|
||||
@ -112,6 +143,7 @@ func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
|
||||
// CreateBatch create a batch of pods. All pods are created before waiting.
|
||||
func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
ps := make([]*v1.Pod, len(pods))
|
||||
var wg sync.WaitGroup
|
||||
for i, pod := range pods {
|
||||
@ -130,7 +162,7 @@ func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
|
||||
// there is any other apierrors. name is the pod name, updateFn is the function updating the
|
||||
// pod object.
|
||||
func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *v1.Pod)) {
|
||||
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
|
||||
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Millisecond*500, time.Second*30, false, func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
|
||||
@ -192,6 +224,19 @@ func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.
|
||||
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, c.namespace, timeout), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// addTestOrigin adds annotations to help identifying tests which incorrectly leak pods because insufficient cleanup
|
||||
func (c *PodClient) setOwnerAnnotation(pod *v1.Pod) {
|
||||
if !c.ownerTracking {
|
||||
return
|
||||
}
|
||||
ginkgo.GinkgoHelper()
|
||||
location := ginkgotypes.NewCodeLocation(0)
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
pod.Annotations[AnnotationTestOwner] = fmt.Sprintf("%s:%d", location.FileName, location.LineNumber)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
if !framework.TestContext.NodeE2E {
|
||||
@ -264,7 +309,7 @@ func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time
|
||||
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
|
||||
func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) {
|
||||
var ev *v1.Event
|
||||
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %w", err)
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -43,11 +43,11 @@ func NodeOSDistroIs(distro string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
const InfiniteSleepCommand = "trap exit TERM; while true; do sleep 1; done"
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
return commands
|
||||
return []string{"/bin/sh", "-c", command}
|
||||
}
|
||||
|
||||
// GetDefaultTestImage returns the default test image based on OS.
|
||||
|
119
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
119
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -99,17 +99,22 @@ func BeInPhase(phase v1.PodPhase) types.GomegaMatcher {
|
||||
}).WithTemplate("Expected Pod {{.To}} be in {{format .Data}}\nGot instead:\n{{.FormattedActual}}").WithTemplateData(phase)
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
||||
// namespace ns are either running and ready, or failed but controlled by a
|
||||
// controller. Also, it ensures that at least minPods are running and
|
||||
// ready. It has separate behavior from other 'wait for' pods functions in
|
||||
// that it requests the list of pods on every iteration. This is useful, for
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
// WaitForAlmostAllReady waits up to timeout for the following conditions:
|
||||
// 1. At least minPods Pods in Namespace ns are Running and Ready
|
||||
// 2. All Pods in Namespace ns are either Ready or Succeeded
|
||||
// 3. All Pods part of a ReplicaSet or ReplicationController in Namespace ns are Ready
|
||||
//
|
||||
// After the timeout has elapsed, an error is returned if the number of Pods in a Pending Phase
|
||||
// is greater than allowedNotReadyPods.
|
||||
//
|
||||
// It is generally recommended to use WaitForPodsRunningReady instead of this function
|
||||
// whenever possible, because its behavior is more intuitive. Similar to WaitForPodsRunningReady,
|
||||
// this function requests the list of pods on every iteration, making it useful for situations
|
||||
// where the set of Pods is likely changing, such as during cluster startup.
|
||||
//
|
||||
// If minPods or allowedNotReadyPods are -1, this method returns immediately
|
||||
// without waiting.
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration) error {
|
||||
func WaitForAlmostAllPodsReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int, timeout time.Duration) error {
|
||||
if minPods == -1 || allowedNotReadyPods == -1 {
|
||||
return nil
|
||||
}
|
||||
@ -126,14 +131,12 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
Pods []v1.Pod
|
||||
}
|
||||
|
||||
// notReady is -1 for any failure other than a timeout.
|
||||
// Otherwise it is the number of pods that we were still
|
||||
// waiting for.
|
||||
notReady := int32(-1)
|
||||
nOk := 0
|
||||
badPods := []v1.Pod{}
|
||||
otherPods := []v1.Pod{}
|
||||
succeededPods := []string{}
|
||||
|
||||
err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
|
||||
// Reset notReady at the start of a poll attempt.
|
||||
notReady = -1
|
||||
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -163,11 +166,10 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
replicaOk += rs.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
nOk := int32(0)
|
||||
notReady = int32(0)
|
||||
failedPods := []v1.Pod{}
|
||||
otherPods := []v1.Pod{}
|
||||
succeededPods := []string{}
|
||||
nOk = 0
|
||||
badPods = []v1.Pod{}
|
||||
otherPods = []v1.Pod{}
|
||||
succeededPods = []string{}
|
||||
for _, pod := range s.Pods {
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
@ -179,14 +181,13 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
case pod.Status.Phase == v1.PodFailed:
|
||||
// ignore failed pods that are controlled by some controller
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
failedPods = append(failedPods, pod)
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
default:
|
||||
notReady++
|
||||
otherPods = append(otherPods, pod)
|
||||
}
|
||||
}
|
||||
done := replicaOk == replicas && nOk >= minPods && (len(failedPods)+len(otherPods)) == 0
|
||||
done := replicaOk == replicas && nOk >= minPods && (len(badPods)+len(otherPods)) == 0
|
||||
if done {
|
||||
return nil, nil
|
||||
}
|
||||
@ -200,8 +201,8 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
if len(succeededPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that completed successfully:\n%s", format.Object(succeededPods, 1)))
|
||||
}
|
||||
if len(failedPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(failedPods, 1)))
|
||||
if len(badPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(badPods, 1)))
|
||||
}
|
||||
if len(otherPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that were neither completed nor running:\n%s", format.Object(otherPods, 1)))
|
||||
@ -211,13 +212,79 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
}))
|
||||
|
||||
// An error might not be fatal.
|
||||
if err != nil && notReady >= 0 && notReady <= allowedNotReadyPods {
|
||||
framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
if len(otherPods) <= allowedNotReadyPods {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout for the following conditions:
|
||||
// 1. At least minPods Pods in Namespace ns are Running and Ready
|
||||
// 2. No Pods in Namespace ns are Failed and not owned by a controller or Pending
|
||||
//
|
||||
// An error is returned if either of these conditions are not met within the timeout.
|
||||
//
|
||||
// It has separate behavior from other 'wait for' pods functions in
|
||||
// that it requests the list of pods on every iteration. This is useful, for
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods int, timeout time.Duration) error {
|
||||
|
||||
return framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) ([]v1.Pod, error) {
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing pods in namespace %s: %w", ns, err)
|
||||
}
|
||||
return podList.Items, nil
|
||||
})).WithTimeout(timeout).Should(framework.MakeMatcher(func(pods []v1.Pod) (func() string, error) {
|
||||
|
||||
nOk := 0
|
||||
badPods := []v1.Pod{}
|
||||
otherPods := []v1.Pod{}
|
||||
succeededPods := []string{}
|
||||
|
||||
for _, pod := range pods {
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
case res && err == nil:
|
||||
nOk++
|
||||
case pod.Status.Phase == v1.PodSucceeded:
|
||||
// ignore succeeded pods
|
||||
succeededPods = append(succeededPods, pod.Name)
|
||||
case pod.Status.Phase == v1.PodFailed:
|
||||
// ignore failed pods that are controlled by some controller
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
default:
|
||||
otherPods = append(otherPods, pod)
|
||||
}
|
||||
}
|
||||
if nOk >= minPods && len(badPods)+len(otherPods) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Delayed formatting of a failure message.
|
||||
return func() string {
|
||||
var buffer strings.Builder
|
||||
buffer.WriteString(fmt.Sprintf("Expected all pods (need at least %d) in namespace %q to be running and ready \n", minPods, ns))
|
||||
buffer.WriteString(fmt.Sprintf("%d / %d pods were running and ready.\n", nOk, len(pods)))
|
||||
if len(succeededPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that completed successfully:\n%s", format.Object(succeededPods, 1)))
|
||||
}
|
||||
if len(badPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(badPods, 1)))
|
||||
}
|
||||
if len(otherPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that were neither completed nor running:\n%s", format.Object(otherPods, 1)))
|
||||
}
|
||||
return buffer.String()
|
||||
}, nil
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
// The condition callback may use gomega.StopTrying to abort early.
|
||||
func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
|
||||
|
27
vendor/k8s.io/kubernetes/test/e2e/framework/provider_less.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/e2e/framework/provider_less.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
//go:build providerless
|
||||
// +build providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
func init() {
|
||||
// fake "gce"
|
||||
RegisterProvider("gce", func() (ProviderInterface, error) {
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
}
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
@ -93,6 +93,9 @@ type PersistentVolumeConfig struct {
|
||||
// [Optional] Labels contains information used to organize and categorize
|
||||
// objects
|
||||
Labels labels.Set
|
||||
// [Optional] Annotations contains information used to organize and categorize
|
||||
// objects
|
||||
Annotations map[string]string
|
||||
// PVSource contains the details of the underlying volume and must be set
|
||||
PVSource v1.PersistentVolumeSource
|
||||
// [Optional] Prebind lets you specify a PVC to bind this PV to before
|
||||
@ -124,10 +127,11 @@ type PersistentVolumeClaimConfig struct {
|
||||
// unspecified
|
||||
ClaimSize string
|
||||
// AccessModes defaults to RWO if unspecified
|
||||
AccessModes []v1.PersistentVolumeAccessMode
|
||||
Annotations map[string]string
|
||||
Selector *metav1.LabelSelector
|
||||
StorageClassName *string
|
||||
AccessModes []v1.PersistentVolumeAccessMode
|
||||
Annotations map[string]string
|
||||
Selector *metav1.LabelSelector
|
||||
StorageClassName *string
|
||||
VolumeAttributesClassName *string
|
||||
// VolumeMode defaults to nil if unspecified or specified as the empty
|
||||
// string
|
||||
VolumeMode *v1.PersistentVolumeMode
|
||||
@ -595,13 +599,18 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
|
||||
}
|
||||
}
|
||||
|
||||
annotations := map[string]string{
|
||||
volumeGidAnnotationKey: "777",
|
||||
}
|
||||
for k, v := range pvConfig.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Labels: pvConfig.Labels,
|
||||
Annotations: map[string]string{
|
||||
volumeGidAnnotationKey: "777",
|
||||
},
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: pvConfig.ReclaimPolicy,
|
||||
@ -653,8 +662,9 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
|
||||
v1.ResourceStorage: resource.MustParse(cfg.ClaimSize),
|
||||
},
|
||||
},
|
||||
StorageClassName: cfg.StorageClassName,
|
||||
VolumeMode: cfg.VolumeMode,
|
||||
StorageClassName: cfg.StorageClassName,
|
||||
VolumeAttributesClassName: cfg.VolumeAttributesClassName,
|
||||
VolumeMode: cfg.VolumeMode,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -56,8 +56,7 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
var featureGate featuregate.FeatureGate
|
||||
|
||||
// InitFeatureGates must be called in test suites that have a --feature-gates parameter.
|
||||
// If not called, SkipUnlessFeatureGateEnabled and SkipIfFeatureGateEnabled will
|
||||
// record a test failure.
|
||||
// If not called, SkipUnlessFeatureGateEnabled will record a test failure.
|
||||
func InitFeatureGates(defaults featuregate.FeatureGate, overrides map[string]bool) error {
|
||||
clone := defaults.DeepCopy()
|
||||
if err := clone.SetFromMap(overrides); err != nil {
|
||||
@ -67,6 +66,16 @@ func InitFeatureGates(defaults featuregate.FeatureGate, overrides map[string]boo
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFeatureGateEnabled can be used during e2e tests to figure out if a certain feature gate is enabled.
|
||||
// This function is dependent on InitFeatureGates under the hood. Therefore, the test must be called with a
|
||||
// --feature-gates parameter.
|
||||
func IsFeatureGateEnabled(feature featuregate.Feature) bool {
|
||||
if featureGate == nil {
|
||||
framework.Failf("feature gate interface is not initialized")
|
||||
}
|
||||
return featureGate.Enabled(feature)
|
||||
}
|
||||
|
||||
// SkipUnlessFeatureGateEnabled skips if the feature is disabled.
|
||||
//
|
||||
// Beware that this only works in test suites that have a --feature-gate
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -246,7 +246,7 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
|
||||
}
|
||||
client, err := ssh.Dial("tcp", host, config)
|
||||
if err != nil {
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
|
||||
if client, err = ssh.Dial("tcp", host, config); err != nil {
|
||||
return false, nil // retrying, error will be logged above
|
||||
@ -300,7 +300,7 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
|
||||
}
|
||||
bastionClient, err := ssh.Dial("tcp", bastion, config)
|
||||
if err != nil {
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, bastion, err)
|
||||
if bastionClient, err = ssh.Dial("tcp", bastion, config); err != nil {
|
||||
return false, err
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -399,7 +399,7 @@ func CreateGinkgoConfig() (types.SuiteConfig, types.ReporterConfig) {
|
||||
// Randomize specs as well as suites
|
||||
suiteConfig.RandomizeAllSpecs = true
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 {
|
||||
if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 && suiteConfig.LabelFilter == "" {
|
||||
suiteConfig.SkipStrings = []string{`\[Flaky\]|\[Feature:.+\]`}
|
||||
}
|
||||
return suiteConfig, reporterConfig
|
||||
|
17
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -53,7 +53,6 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
@ -132,14 +131,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// BusyBoxImage is the image URI of BusyBox.
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
|
||||
// ProvidersWithSSH are those providers where each node is accessible with SSH
|
||||
ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"}
|
||||
|
||||
// ServeHostnameImage is a serve hostname image name.
|
||||
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
)
|
||||
|
||||
// RunID is a unique identifier of the e2e run.
|
||||
@ -245,7 +238,7 @@ func WaitForNamespacesDeleted(ctx context.Context, c clientset.Interface, namesp
|
||||
nsMap[ns] = true
|
||||
}
|
||||
//Now POLL until all namespaces have been eradicated.
|
||||
return wait.PollWithContext(ctx, 2*time.Second, timeout,
|
||||
return wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -423,7 +416,7 @@ func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, ski
|
||||
// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
|
||||
// Some components use EndpointSlices other Endpoints, we must verify that both objects meet the requirements.
|
||||
func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
|
||||
return wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
endpoint, err := c.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -622,8 +615,10 @@ func CoreDump(dir string) {
|
||||
Logf("Dumping logs locally to: %s", dir)
|
||||
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir)
|
||||
}
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices)))
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_JOURNAL=%v", TestContext.DumpSystemdJournal))
|
||||
env := os.Environ()
|
||||
env = append(env, fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices)))
|
||||
env = append(env, fmt.Sprintf("LOG_DUMP_SYSTEMD_JOURNAL=%v", TestContext.DumpSystemdJournal))
|
||||
cmd.Env = env
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
25
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -267,6 +268,7 @@ var factories = map[What]ItemFactory{
|
||||
{"StatefulSet"}: &statefulSetFactory{},
|
||||
{"Deployment"}: &deploymentFactory{},
|
||||
{"StorageClass"}: &storageClassFactory{},
|
||||
{"VolumeAttributesClass"}: &volumeAttributesClassFactory{},
|
||||
{"CustomResourceDefinition"}: &customResourceDefinitionFactory{},
|
||||
}
|
||||
|
||||
@ -314,6 +316,8 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
PatchName(f, &item.Name)
|
||||
case *storagev1.StorageClass:
|
||||
PatchName(f, &item.Name)
|
||||
case *storagev1beta1.VolumeAttributesClass:
|
||||
PatchName(f, &item.Name)
|
||||
case *storagev1.CSIDriver:
|
||||
PatchName(f, &item.Name)
|
||||
case *v1.ServiceAccount:
|
||||
@ -618,6 +622,27 @@ func (*storageClassFactory) Create(ctx context.Context, f *framework.Framework,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type volumeAttributesClassFactory struct{}
|
||||
|
||||
func (f *volumeAttributesClassFactory) New() runtime.Object {
|
||||
return &storagev1beta1.VolumeAttributesClass{}
|
||||
}
|
||||
|
||||
func (*volumeAttributesClassFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*storagev1beta1.VolumeAttributesClass)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.StorageV1beta1().VolumeAttributesClasses()
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create VolumeAttributesClass: %w", err)
|
||||
}
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type csiDriverFactory struct{}
|
||||
|
||||
func (f *csiDriverFactory) New() runtime.Object {
|
||||
|
34
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -619,6 +619,40 @@ func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.Gro
|
||||
return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout)
|
||||
}
|
||||
|
||||
// EnsureGVRDeletion checks that no object as defined by the group/version/kind and name is ever found during the given time period
|
||||
func EnsureGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error {
|
||||
var resourceClient dynamic.ResourceInterface
|
||||
if namespace != "" {
|
||||
resourceClient = c.Resource(gvr).Namespace(namespace)
|
||||
} else {
|
||||
resourceClient = c.Resource(gvr)
|
||||
}
|
||||
|
||||
err := framework.Gomega().Eventually(ctx, func(ctx context.Context) error {
|
||||
_, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{})
|
||||
return err
|
||||
}).WithTimeout(timeout).WithPolling(poll).Should(gomega.MatchError(apierrors.IsNotFound, fmt.Sprintf("failed to delete %s %s", gvr, objectName)))
|
||||
return err
|
||||
}
|
||||
|
||||
// EnsureNoGVRDeletion checks that an object as defined by the group/version/kind and name has not been deleted during the given time period
|
||||
func EnsureNoGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error {
|
||||
var resourceClient dynamic.ResourceInterface
|
||||
if namespace != "" {
|
||||
resourceClient = c.Resource(gvr).Namespace(namespace)
|
||||
} else {
|
||||
resourceClient = c.Resource(gvr)
|
||||
}
|
||||
err := framework.Gomega().Consistently(ctx, func(ctx context.Context) error {
|
||||
_, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get %s %s: %w", gvr.Resource, objectName, err)
|
||||
}
|
||||
return nil
|
||||
}).WithTimeout(timeout).WithPolling(poll).Should(gomega.Succeed())
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted
|
||||
func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
|
||||
|
@ -54,7 +54,7 @@ spec:
|
||||
hostPath:
|
||||
path: /etc/nvidia
|
||||
initContainers:
|
||||
- image: "ubuntu"
|
||||
- image: "ubuntu@sha256:3f85b7caad41a95462cf5b787d8a04604c8262cdcdf9a472b8c52ef83375fe15"
|
||||
name: bind-mount-install-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
@ -142,6 +142,6 @@ spec:
|
||||
- name: nvidia-config
|
||||
mountPath: /etc/nvidia
|
||||
containers:
|
||||
- image: "registry.k8s.io/pause:3.9"
|
||||
- image: "registry.k8s.io/pause:3.10"
|
||||
name: pause
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
labels:
|
||||
- sig/etcd
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.5.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.6.1/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.11.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.12.1/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v4.0.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v5.0.1/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
@ -32,7 +32,7 @@ rules:
|
||||
# verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
verbs: ["get", "list", "watch", "create", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.10.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.11.1/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v7.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v8.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# Together with the RBAC file for external-provisioner, this YAML file
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/release-1.13/deploy/
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/release-1.14/deploy/
|
||||
|
||||
Do not edit manually. Run ./update-hostpath.sh to refresh the content.
|
||||
|
@ -219,7 +219,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -262,7 +262,7 @@ spec:
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.11.0
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.12.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
@ -276,7 +276,7 @@ spec:
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -310,7 +310,7 @@ spec:
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -324,7 +324,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -340,7 +340,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
@ -354,7 +354,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
@ -66,7 +66,7 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: socat
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
command:
|
||||
- socat
|
||||
args:
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -34,7 +34,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -35,7 +35,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
|
Reference in New Issue
Block a user