mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
rebase: update kubernetes to 1.30
updating kubernetes to 1.30 release Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
62ddcf715b
commit
e727bd351e
11
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -293,13 +293,6 @@ func (f *FailureError) backtrace() {
|
||||
// }
|
||||
var ErrFailure error = FailureError{}
|
||||
|
||||
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
|
||||
//
|
||||
// Deprecated: use gomega.Expect().ToNot(gomega.Equal())
|
||||
func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...)
|
||||
}
|
||||
|
||||
// ExpectError expects an error happens, otherwise an exception raises
|
||||
//
|
||||
// Deprecated: use gomega.Expect().To(gomega.HaveOccurred()) or (better!) check
|
||||
@ -350,9 +343,9 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
// because it is not included in the failure message.
|
||||
var failure FailureError
|
||||
if errors.As(err, &failure) && failure.Backtrace() != "" {
|
||||
Logf("Failed inside E2E framework:\n %s", strings.ReplaceAll(failure.Backtrace(), "\n", "\n "))
|
||||
log(offset+1, fmt.Sprintf("Failed inside E2E framework:\n %s", strings.ReplaceAll(failure.Backtrace(), "\n", "\n ")))
|
||||
} else if !errors.Is(err, ErrFailure) {
|
||||
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
|
||||
log(offset+1, fmt.Sprintf("Unexpected error: %s\n%s", prefix, format.Object(err, 1)))
|
||||
}
|
||||
Fail(prefix+err.Error(), 1+offset)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -66,7 +66,7 @@ var (
|
||||
//
|
||||
// This can be used by extensions of the core framework to modify
|
||||
// settings in the framework instance or to add additional callbacks
|
||||
// with gingko.BeforeEach/AfterEach/DeferCleanup.
|
||||
// with ginkgo.BeforeEach/AfterEach/DeferCleanup.
|
||||
//
|
||||
// When a test runs, functions will be invoked in this order:
|
||||
// - BeforeEaches defined by tests before f.NewDefaultFramework
|
||||
|
100
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgologger.go
generated
vendored
Normal file
100
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgologger.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package framework contains provider-independent helper code for
|
||||
// building and running E2E tests with Ginkgo. The actual Ginkgo test
|
||||
// suites gets assembled by combining this framework, the optional
|
||||
// provider support code and specific tests via a separate .go file
|
||||
// like Kubernetes' test/e2e.go.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
ginkgotypes "github.com/onsi/ginkgo/v2/types"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/textlogger"
|
||||
|
||||
_ "k8s.io/component-base/logs/testinit" // Ensure command line flags are registered.
|
||||
)
|
||||
|
||||
var (
|
||||
logConfig = textlogger.NewConfig(
|
||||
textlogger.Output(ginkgo.GinkgoWriter),
|
||||
textlogger.Backtrace(unwind),
|
||||
)
|
||||
ginkgoLogger = textlogger.NewLogger(logConfig)
|
||||
TimeNow = time.Now // Can be stubbed out for testing.
|
||||
Pid = os.Getpid() // Can be stubbed out for testing.
|
||||
)
|
||||
|
||||
func init() {
|
||||
// ktesting and testinit already registered the -v and -vmodule
|
||||
// command line flags. To configure the textlogger instead, we
|
||||
// need to swap out the flag.Value for those.
|
||||
var fs flag.FlagSet
|
||||
logConfig.AddFlags(&fs)
|
||||
fs.VisitAll(func(loggerFlag *flag.Flag) {
|
||||
klogFlag := flag.CommandLine.Lookup(loggerFlag.Name)
|
||||
if klogFlag != nil {
|
||||
klogFlag.Value = loggerFlag.Value
|
||||
}
|
||||
})
|
||||
|
||||
// Now install the textlogger as the klog default logger.
|
||||
// Calls like klog.Info then will write to ginkgo.GingoWriter
|
||||
// through the textlogger.
|
||||
//
|
||||
// However, stack unwinding is then still being done by klog and thus
|
||||
// ignores ginkgo.GingkoHelper. Tests should use framework.Logf or
|
||||
// structured, contextual logging.
|
||||
writer, _ := ginkgoLogger.GetSink().(textlogger.KlogBufferWriter)
|
||||
opts := []klog.LoggerOption{
|
||||
klog.ContextualLogger(true),
|
||||
klog.WriteKlogBuffer(writer.WriteKlogBuffer),
|
||||
}
|
||||
klog.SetLoggerWithOptions(ginkgoLogger, opts...)
|
||||
}
|
||||
|
||||
func unwind(skip int) (string, int) {
|
||||
location := ginkgotypes.NewCodeLocation(skip + 1)
|
||||
return location.FileName, location.LineNumber
|
||||
}
|
||||
|
||||
// log re-implements klog.Info: same header, but stack unwinding
|
||||
// with support for ginkgo.GinkgoWriter and skipping stack levels.
|
||||
func log(offset int, msg string) {
|
||||
now := TimeNow()
|
||||
file, line := unwind(offset + 1)
|
||||
if file == "" {
|
||||
file = "???"
|
||||
line = 1
|
||||
} else if slash := strings.LastIndex(file, "/"); slash >= 0 {
|
||||
file = file[slash+1:]
|
||||
}
|
||||
_, month, day := now.Date()
|
||||
hour, minute, second := now.Clock()
|
||||
header := fmt.Sprintf("I%02d%02d %02d:%02d:%02d.%06d %d %s:%d]",
|
||||
month, day, hour, minute, second, now.Nanosecond()/1000, Pid, file, line)
|
||||
|
||||
fmt.Fprintln(ginkgo.GinkgoWriter, header, msg)
|
||||
}
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
@ -148,7 +148,7 @@ func ConformanceIt(args ...interface{}) bool {
|
||||
|
||||
// It is a wrapper around [ginkgo.It] which supports framework With* labels as
|
||||
// optional arguments in addition to those already supported by ginkgo itself,
|
||||
// like [ginkgo.Label] and [gingko.Offset].
|
||||
// like [ginkgo.Label] and [ginkgo.Offset].
|
||||
//
|
||||
// Text and arguments may be mixed. The final text is a concatenation
|
||||
// of the text arguments and special tags from the With functions.
|
||||
@ -163,7 +163,7 @@ func (f *Framework) It(args ...interface{}) bool {
|
||||
|
||||
// Describe is a wrapper around [ginkgo.Describe] which supports framework
|
||||
// With* labels as optional arguments in addition to those already supported by
|
||||
// ginkgo itself, like [ginkgo.Label] and [gingko.Offset].
|
||||
// ginkgo itself, like [ginkgo.Label] and [ginkgo.Offset].
|
||||
//
|
||||
// Text and arguments may be mixed. The final text is a concatenation
|
||||
// of the text arguments and special tags from the With functions.
|
||||
@ -178,7 +178,7 @@ func (f *Framework) Describe(args ...interface{}) bool {
|
||||
|
||||
// Context is a wrapper around [ginkgo.Context] which supports framework With*
|
||||
// labels as optional arguments in addition to those already supported by
|
||||
// ginkgo itself, like [ginkgo.Label] and [gingko.Offset].
|
||||
// ginkgo itself, like [ginkgo.Label] and [ginkgo.Offset].
|
||||
//
|
||||
// Text and arguments may be mixed. The final text is a concatenation
|
||||
// of the text arguments and special tags from the With functions.
|
||||
@ -248,7 +248,7 @@ func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interf
|
||||
|
||||
var (
|
||||
tagRe = regexp.MustCompile(`\[.*?\]`)
|
||||
deprecatedTags = sets.New("Conformance", "NodeConformance", "Disruptive", "Serial", "Slow")
|
||||
deprecatedTags = sets.New("Conformance", "Flaky", "NodeConformance", "Disruptive", "Serial", "Slow")
|
||||
deprecatedTagPrefixes = sets.New("Environment", "Feature", "NodeFeature", "FeatureGate")
|
||||
deprecatedStability = sets.New("Alpha", "Beta")
|
||||
)
|
||||
@ -526,15 +526,37 @@ func withLabel(label string) interface{} {
|
||||
return newLabel(label)
|
||||
}
|
||||
|
||||
// WithFlaky specifies that a certain test or group of tests are failing randomly.
|
||||
// These tests are usually filtered out and ran separately from other tests.
|
||||
func WithFlaky() interface{} {
|
||||
return withFlaky()
|
||||
}
|
||||
|
||||
// WithFlaky is a shorthand for the corresponding package function.
|
||||
func (f *Framework) WithFlaky() interface{} {
|
||||
return withFlaky()
|
||||
}
|
||||
|
||||
func withFlaky() interface{} {
|
||||
return newLabel("Flaky")
|
||||
}
|
||||
|
||||
type label struct {
|
||||
// parts get concatenated with ":" to build the full label.
|
||||
parts []string
|
||||
// extra is an optional fully-formed extra label.
|
||||
extra string
|
||||
// explanation gets set for each label to help developers
|
||||
// who pass a label to a ginkgo function. They need to use
|
||||
// the corresponding framework function instead.
|
||||
explanation string
|
||||
}
|
||||
|
||||
func newLabel(parts ...string) label {
|
||||
return label{parts: parts}
|
||||
return label{
|
||||
parts: parts,
|
||||
explanation: "If you see this as part of an 'Unknown Decorator' error from Ginkgo, then you need to replace the ginkgo.It/Context/Describe call with the corresponding framework.It/Context/Describe or (if available) f.It/Context/Describe.",
|
||||
}
|
||||
}
|
||||
|
||||
// TagsEqual can be used to check whether two tags are the same.
|
||||
|
15
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
@ -18,22 +18,17 @@ package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
func nowStamp() string {
|
||||
return time.Now().Format(time.StampMilli)
|
||||
}
|
||||
|
||||
func log(level string, format string, args ...interface{}) {
|
||||
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
|
||||
}
|
||||
|
||||
// Logf logs the info.
|
||||
//
|
||||
// Use this instead of `klog.Infof` because stack unwinding automatically
|
||||
// skips over helper functions which marked themselves as helper by
|
||||
// calling [ginkgo.GinkgoHelper].
|
||||
func Logf(format string, args ...interface{}) {
|
||||
log("INFO", format, args...)
|
||||
log(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Failf logs the fail info, including a stack trace starts with its direct caller
|
||||
|
38
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -75,7 +75,7 @@ func BeRunningNoRetries() types.GomegaMatcher {
|
||||
gcustom.MakeMatcher(func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, gomega.StopTrying(fmt.Sprintf("Expected pod to reach phase %q, got final phase %q instead.", v1.PodRunning, pod.Status.Phase))
|
||||
return false, gomega.StopTrying(fmt.Sprintf("Expected pod to reach phase %q, got final phase %q instead:\n%s", v1.PodRunning, pod.Status.Phase, format.Object(pod, 1)))
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
@ -335,8 +335,8 @@ func RunningReady(p *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running.
|
||||
func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
_, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
func WaitForPodsRunning(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
_, err := WaitForPods(ctx, c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
"be running and ready", func(pod *v1.Pod) bool {
|
||||
ready, _ := testutils.PodRunningReady(pod)
|
||||
return ready
|
||||
@ -345,8 +345,8 @@ func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.
|
||||
}
|
||||
|
||||
// WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state.
|
||||
func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
_, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
func WaitForPodsSchedulingGated(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
_, err := WaitForPods(ctx, c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
"be in scheduling gated state", func(pod *v1.Pod) bool {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
|
||||
@ -360,8 +360,8 @@ func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeo
|
||||
|
||||
// WaitForPodsWithSchedulingGates waits for a given `timeout` to evaluate if a certain amount of pods in given `ns`
|
||||
// match the given `schedulingGates`stay in scheduling gated state.
|
||||
func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error {
|
||||
_, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
func WaitForPodsWithSchedulingGates(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error {
|
||||
_, err := WaitForPods(ctx, c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
|
||||
"have certain scheduling gates", func(pod *v1.Pod) bool {
|
||||
return reflect.DeepEqual(pod.Spec.SchedulingGates, schedulingGates)
|
||||
})
|
||||
@ -401,14 +401,14 @@ func WaitForPodTerminatingInNamespaceTimeout(ctx context.Context, c clientset.In
|
||||
func WaitForPodSuccessInNamespaceTimeout(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.DeletionTimestamp == nil && pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
|
||||
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
|
||||
return true, gomega.StopTrying(fmt.Sprintf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName))
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodSucceeded:
|
||||
ginkgo.By("Saw pod success")
|
||||
return true, nil
|
||||
case v1.PodFailed:
|
||||
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
|
||||
return true, gomega.StopTrying(fmt.Sprintf("pod %q failed with status: %+v", podName, pod.Status))
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
@ -518,11 +518,6 @@ func WaitForPodSuccessInNamespace(ctx context.Context, c clientset.Interface, po
|
||||
return WaitForPodSuccessInNamespaceTimeout(ctx, c, podName, namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
|
||||
func WaitForPodSuccessInNamespaceSlow(ctx context.Context, c clientset.Interface, podName string, namespace string) error {
|
||||
return WaitForPodSuccessInNamespaceTimeout(ctx, c, podName, namespace, slowPodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
|
||||
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
|
||||
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
|
||||
@ -779,3 +774,18 @@ func WaitForContainerRunning(ctx context.Context, c clientset.Interface, namespa
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForContainerTerminated waits for the given Pod container to have a state of terminated
|
||||
func WaitForContainerTerminated(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, timeout time.Duration) error {
|
||||
conditionDesc := fmt.Sprintf("container %s terminated", containerName)
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
|
||||
for _, cs := range statuses {
|
||||
if cs.Name == containerName {
|
||||
return cs.State.Terminated != nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
27
vendor/k8s.io/kubernetes/test/e2e/framework/provider_less.go
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/test/e2e/framework/provider_less.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
//go:build providerless
|
||||
// +build providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
func init() {
|
||||
// fake "gce"
|
||||
RegisterProvider("gce", func() (ProviderInterface, error) {
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
}
|
41
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
41
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -22,13 +22,10 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -85,34 +82,6 @@ func SkipUnlessFeatureGateEnabled(gate featuregate.Feature) {
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfFeatureGateEnabled skips if the feature is enabled.
|
||||
//
|
||||
// Beware that this only works in test suites that have a --feature-gate
|
||||
// parameter and call InitFeatureGates. In test/e2e, the `Feature: XYZ` tag
|
||||
// has to be used instead and invocations have to make sure that they
|
||||
// only run tests that work with the given test cluster.
|
||||
func SkipIfFeatureGateEnabled(gate featuregate.Feature) {
|
||||
if featureGate == nil {
|
||||
framework.Failf("Feature gate checking is not enabled, don't use SkipFeatureGateEnabled(%v). Instead use the Feature tag.", gate)
|
||||
}
|
||||
if featureGate.Enabled(gate) {
|
||||
skipInternalf(1, "Only supported when %v feature is disabled", gate)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfMissingResource skips if the gvr resource is missing.
|
||||
func SkipIfMissingResource(ctx context.Context, dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
|
||||
_, err := resourceClient.List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
// not all resources support list, so we ignore those
|
||||
if apierrors.IsMethodNotSupported(err) || apierrors.IsNotFound(err) || apierrors.IsForbidden(err) {
|
||||
skipInternalf(1, "Could not find %s resource, skipping test: %#v", gvr, err)
|
||||
}
|
||||
framework.Failf("Unexpected error getting %v: %v", gvr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessNodeCountIsAtLeast skips if the number of nodes is less than the minNodeCount.
|
||||
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
|
||||
if framework.TestContext.CloudConfig.NumNodes < minNodeCount {
|
||||
@ -230,16 +199,6 @@ func SkipIfAppArmorNotSupported() {
|
||||
SkipUnlessNodeOSDistroIs(AppArmorDistros...)
|
||||
}
|
||||
|
||||
// RunIfSystemSpecNameIs runs if the system spec name is included in the names.
|
||||
func RunIfSystemSpecNameIs(names ...string) {
|
||||
for _, name := range names {
|
||||
if name == framework.TestContext.SystemSpecName {
|
||||
return
|
||||
}
|
||||
}
|
||||
skipInternalf(1, "Skipped because system spec name %q is not in %v", framework.TestContext.SystemSpecName, names)
|
||||
}
|
||||
|
||||
// SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem run if the component run as pods and client can delete them
|
||||
func SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx context.Context, componentName string, c clientset.Interface, ns string, labelSet labels.Set) {
|
||||
// verify if component run as pod
|
||||
|
19
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -465,10 +465,10 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.DurationVar(&nodeKiller.SimulatedDowntime, "node-killer-simulated-downtime", 10*time.Minute, "A delay between node death and recreation")
|
||||
}
|
||||
|
||||
// GenerateSecureToken returns a string of length tokenLen, consisting
|
||||
// generateSecureToken returns a string of length tokenLen, consisting
|
||||
// of random bytes encoded as base64 for use as a Bearer Token during
|
||||
// communication with an APIServer
|
||||
func GenerateSecureToken(tokenLen int) (string, error) {
|
||||
func generateSecureToken(tokenLen int) (string, error) {
|
||||
// Number of bytes to be tokenLen when base64 encoded.
|
||||
tokenSize := math.Ceil(float64(tokenLen) * 6 / 8)
|
||||
rawToken := make([]byte, int(tokenSize))
|
||||
@ -492,13 +492,6 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
if t.KubeTestRepoList != "" {
|
||||
image.Init(t.KubeTestRepoList)
|
||||
}
|
||||
var fs flag.FlagSet
|
||||
klog.InitFlags(&fs)
|
||||
fs.Set("logtostderr", "false")
|
||||
fs.Set("alsologtostderr", "false")
|
||||
fs.Set("one_output", "true")
|
||||
fs.Set("stderrthreshold", "10" /* higher than any of the severities -> none pass the threshold */)
|
||||
klog.SetOutput(ginkgo.GinkgoWriter)
|
||||
|
||||
if t.ListImages {
|
||||
for _, v := range image.GetImageConfigs() {
|
||||
@ -548,10 +541,8 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
}
|
||||
if len(t.BearerToken) == 0 {
|
||||
var err error
|
||||
t.BearerToken, err = GenerateSecureToken(16)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to generate bearer token: %v", err)
|
||||
}
|
||||
t.BearerToken, err = generateSecureToken(16)
|
||||
ExpectNoError(err, "Failed to generate bearer token")
|
||||
}
|
||||
|
||||
// Allow 1% of nodes to be unready (statistically) - relevant for large clusters.
|
||||
@ -647,7 +638,7 @@ func listTestInformation(report ginkgo.Report) {
|
||||
labels.Insert(spec.Labels()...)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(Output, "The following labels can be used with 'gingko run --label-filter':\n%s%s\n\n", indent, strings.Join(sets.List(labels), "\n"+indent))
|
||||
fmt.Fprintf(Output, "The following labels can be used with 'ginkgo run --label-filter':\n%s%s\n\n", indent, strings.Join(sets.List(labels), "\n"+indent))
|
||||
}
|
||||
if TestContext.listTests {
|
||||
leafs := make([][]string, 0, len(report.SpecReports))
|
||||
|
Reference in New Issue
Block a user