rebase: bump the golang-dependencies group with 1 update

Bumps the golang-dependencies group with 1 update: [golang.org/x/crypto](https://github.com/golang/crypto).


Updates `golang.org/x/crypto` from 0.16.0 to 0.17.0
- [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-12-18 20:31:00 +00:00
committed by mergify[bot]
parent 1ad79314f9
commit e5d9b68d36
398 changed files with 33924 additions and 10753 deletions

View File

@ -4,21 +4,62 @@ rules:
# The following packages are okay to use:
#
# public API
- selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)/|^[a-z]+(/|$)|github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
- selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)
allowedPrefixes: [ "" ]
# stdlib
- selectorRegexp: ^[a-z]+(/|$)
allowedPrefixes: [ "" ]
# Ginkgo + Gomega.
- selectorRegexp: github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
# stdlib x and proto
- selectorRegexp: ^golang.org/x|^google.golang.org/protobuf
allowedPrefixes: [ "" ]
# Ginkgo + Gomega
- selectorRegexp: ^github.com/onsi/(ginkgo|gomega)
allowedPrefixes: [ "" ]
# kube-openapi
- selectorRegexp: ^k8s.io/kube-openapi
allowedPrefixes: [ "" ]
# Public SIG Repos
- selectorRegexp: ^sigs.k8s.io/(json|yaml|structured-merge-diff)
allowedPrefixes: [ "" ]
# some of the shared test helpers (but not E2E sub-packages!)
- selectorRegexp: ^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# Third party deps
- selectorRegexp: ^github.com/|^gopkg.in
allowedPrefixes: [
"gopkg.in/inf.v0",
"gopkg.in/yaml.v2",
"github.com/blang/semver/",
"github.com/davecgh/go-spew/spew",
"github.com/evanphx/json-patch",
"github.com/go-logr/logr",
"github.com/gogo/protobuf/proto",
"github.com/gogo/protobuf/sortkeys",
"github.com/golang/protobuf/proto",
"github.com/google/gnostic-models/openapiv2",
"github.com/google/gnostic-models/openapiv3",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/google/gofuzz",
"github.com/google/uuid",
"github.com/imdario/mergo",
"github.com/prometheus/client_golang/",
"github.com/prometheus/client_model/",
"github.com/prometheus/common/",
"github.com/prometheus/procfs",
"github.com/spf13/cobra",
"github.com/spf13/pflag",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require"
]
# Everything else isn't.
#
# In particular importing any test/e2e/framework/* package would be a

View File

@ -2,7 +2,6 @@
approvers:
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
@ -10,7 +9,6 @@ approvers:
reviewers:
- sig-testing-reviewers
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
@ -18,4 +16,5 @@ reviewers:
labels:
- area/e2e-test-framework
emeritus_approvers:
- fabriziopandini
- timothysc

View File

@ -4,7 +4,7 @@ The Kubernetes E2E framework simplifies writing Ginkgo tests suites. It's main
usage is for these tests suites in the Kubernetes repository itself:
- test/e2e: runs as client for a Kubernetes cluster. The e2e.test binary is
used for conformance testing.
- test/e2e_node: runs on the same node as a kublet instance. Used for testing
- test/e2e_node: runs on the same node as a kubelet instance. Used for testing
kubelet.
- test/e2e_kubeadm: test suite for kubeadm.

108
vendor/k8s.io/kubernetes/test/e2e/framework/bugs.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/onsi/ginkgo/v2/types"
)
var (
bugs []Bug
bugMutex sync.Mutex
)
// RecordBug stores information about a bug in the E2E suite source code that
// cannot be reported through ginkgo.Fail because it was found outside of some
// test, for example during test registration.
//
// This can be used instead of raising a panic. Then all bugs can be reported
// together instead of failing after the first one.
func RecordBug(bug Bug) {
bugMutex.Lock()
defer bugMutex.Unlock()
bugs = append(bugs, bug)
}
type Bug struct {
FileName string
LineNumber int
Message string
}
// NewBug creates a new bug with a location that is obtained by skipping a certain number
// of stack frames. Passing zero will record the source code location of the direct caller
// of NewBug.
func NewBug(message string, skip int) Bug {
location := types.NewCodeLocation(skip + 1)
return Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message}
}
// FormatBugs produces a report that includes all bugs recorded earlier via
// RecordBug. An error is returned with the report if there have been bugs.
func FormatBugs() error {
bugMutex.Lock()
defer bugMutex.Unlock()
if len(bugs) == 0 {
return nil
}
lines := make([]string, 0, len(bugs))
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get current directory: %v", err)
}
// Sort by file name, line number, message. For the sake of simplicity
// this uses the full file name even though the output the may use a
// relative path. Usually the result should be the same because full
// paths will all have the same prefix.
sort.Slice(bugs, func(i, j int) bool {
switch strings.Compare(bugs[i].FileName, bugs[j].FileName) {
case -1:
return true
case 1:
return false
}
if bugs[i].LineNumber < bugs[j].LineNumber {
return true
}
if bugs[i].LineNumber > bugs[j].LineNumber {
return false
}
return bugs[i].Message < bugs[j].Message
})
for _, bug := range bugs {
// Use relative paths, if possible.
path := bug.FileName
if wd != "" {
if relpath, err := filepath.Rel(wd, bug.FileName); err == nil {
path = relpath
}
}
lines = append(lines, fmt.Sprintf("ERROR: %s:%d: %s\n", path, bug.LineNumber, strings.TrimSpace(bug.Message)))
}
return errors.New(strings.Join(lines, ""))
}

View File

@ -212,8 +212,9 @@ func newAsyncAssertion(ctx context.Context, args []interface{}, consistently boo
args: args,
// PodStart is used as default because waiting for a pod is the
// most common operation.
timeout: TestContext.timeouts.PodStart,
interval: TestContext.timeouts.Poll,
timeout: TestContext.timeouts.PodStart,
interval: TestContext.timeouts.Poll,
consistently: consistently,
}
}
@ -292,13 +293,6 @@ func (f *FailureError) backtrace() {
// }
var ErrFailure error = FailureError{}
// ExpectEqual expects the specified two are the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().To(gomega.Equal())
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
}
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().ToNot(gomega.Equal())
@ -362,24 +356,3 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
}
Fail(prefix+err.Error(), 1+offset)
}
// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter.
//
// Deprecated: use gomega.Expect().To(gomega.ConsistOf()) instead
func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...)
}
// ExpectHaveKey expects the actual map has the key in the keyset
//
// Deprecated: use gomega.Expect().To(gomega.HaveKey()) instead
func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...)
}
// ExpectEmpty expects actual is empty
//
// Deprecated: use gomega.Expect().To(gomega.BeEmpty()) instead
func ExpectEmpty(actual interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...)
}

View File

@ -17,13 +17,73 @@ limitations under the License.
package framework
import (
"fmt"
"path"
"reflect"
"regexp"
"slices"
"strings"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
)
// Feature is the name of a certain feature that the cluster under test must have.
// Such features are different from feature gates.
type Feature string
// Environment is the name for the environment in which a test can run, like
// "Linux" or "Windows".
type Environment string
// NodeFeature is the name of a feature that a node must support. To be
// removed, see
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-testing/3041-node-conformance-and-features#nodefeature.
type NodeFeature string
type Valid[T comparable] struct {
items sets.Set[T]
frozen bool
}
// Add registers a new valid item name. The expected usage is
//
// var SomeFeature = framework.ValidFeatures.Add("Some")
//
// during the init phase of an E2E suite. Individual tests should not register
// their own, to avoid uncontrolled proliferation of new items. E2E suites can,
// but don't have to, enforce that by freezing the set of valid names.
func (v *Valid[T]) Add(item T) T {
if v.frozen {
RecordBug(NewBug(fmt.Sprintf(`registry %T is already frozen, "%v" must not be added anymore`, *v, item), 1))
}
if v.items == nil {
v.items = sets.New[T]()
}
if v.items.Has(item) {
RecordBug(NewBug(fmt.Sprintf(`registry %T already contains "%v", it must not be added again`, *v, item), 1))
}
v.items.Insert(item)
return item
}
func (v *Valid[T]) Freeze() {
v.frozen = true
}
// These variables contain the parameters that [WithFeature], [WithEnvironment]
// and [WithNodeFeatures] accept. The framework itself has no pre-defined
// constants. Test suites and tests may define their own and then add them here
// before calling these With functions.
var (
ValidFeatures Valid[Feature]
ValidEnvironments Valid[Environment]
ValidNodeFeatures Valid[NodeFeature]
)
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
@ -65,8 +125,433 @@ func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocati
return codeLocation
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, args ...interface{}) bool {
args = append(args, ginkgo.Offset(1))
return ginkgo.It(text+" [Conformance]", args...)
// SIGDescribe returns a wrapper function for ginkgo.Describe which injects
// the SIG name as annotation. The parameter should be lowercase with
// no spaces and no sig- or SIG- prefix.
func SIGDescribe(sig string) func(...interface{}) bool {
if !sigRE.MatchString(sig) || strings.HasPrefix(sig, "sig-") {
RecordBug(NewBug(fmt.Sprintf("SIG label must be lowercase, no spaces and no sig- prefix, got instead: %q", sig), 1))
}
return func(args ...interface{}) bool {
args = append([]interface{}{WithLabel("sig-" + sig)}, args...)
return registerInSuite(ginkgo.Describe, args)
}
}
var sigRE = regexp.MustCompile(`^[a-z]+(-[a-z]+)*$`)
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(args ...interface{}) bool {
args = append(args, ginkgo.Offset(1), WithConformance())
return It(args...)
}
// It is a wrapper around [ginkgo.It] which supports framework With* labels as
// optional arguments in addition to those already supported by ginkgo itself,
// like [ginkgo.Label] and [gingko.Offset].
//
// Text and arguments may be mixed. The final text is a concatenation
// of the text arguments and special tags from the With functions.
func It(args ...interface{}) bool {
return registerInSuite(ginkgo.It, args)
}
// It is a shorthand for the corresponding package function.
func (f *Framework) It(args ...interface{}) bool {
return registerInSuite(ginkgo.It, args)
}
// Describe is a wrapper around [ginkgo.Describe] which supports framework
// With* labels as optional arguments in addition to those already supported by
// ginkgo itself, like [ginkgo.Label] and [gingko.Offset].
//
// Text and arguments may be mixed. The final text is a concatenation
// of the text arguments and special tags from the With functions.
func Describe(args ...interface{}) bool {
return registerInSuite(ginkgo.Describe, args)
}
// Describe is a shorthand for the corresponding package function.
func (f *Framework) Describe(args ...interface{}) bool {
return registerInSuite(ginkgo.Describe, args)
}
// Context is a wrapper around [ginkgo.Context] which supports framework With*
// labels as optional arguments in addition to those already supported by
// ginkgo itself, like [ginkgo.Label] and [gingko.Offset].
//
// Text and arguments may be mixed. The final text is a concatenation
// of the text arguments and special tags from the With functions.
func Context(args ...interface{}) bool {
return registerInSuite(ginkgo.Context, args)
}
// Context is a shorthand for the corresponding package function.
func (f *Framework) Context(args ...interface{}) bool {
return registerInSuite(ginkgo.Context, args)
}
// registerInSuite is the common implementation of all wrapper functions. It
// expects to be called through one intermediate wrapper.
func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interface{}) bool {
var ginkgoArgs []interface{}
var offset ginkgo.Offset
var texts []string
addLabel := func(label string) {
texts = append(texts, fmt.Sprintf("[%s]", label))
ginkgoArgs = append(ginkgoArgs, ginkgo.Label(label))
}
haveEmptyStrings := false
for _, arg := range args {
switch arg := arg.(type) {
case label:
fullLabel := strings.Join(arg.parts, ":")
addLabel(fullLabel)
if arg.extra != "" {
addLabel(arg.extra)
}
if fullLabel == "Serial" {
ginkgoArgs = append(ginkgoArgs, ginkgo.Serial)
}
case ginkgo.Offset:
offset = arg
case string:
if arg == "" {
haveEmptyStrings = true
}
texts = append(texts, arg)
default:
ginkgoArgs = append(ginkgoArgs, arg)
}
}
offset += 2 // This function and its direct caller.
// Now that we have the final offset, we can record bugs.
if haveEmptyStrings {
RecordBug(NewBug("empty strings as separators are unnecessary and need to be removed", int(offset)))
}
// Enforce that text snippets to not start or end with spaces because
// those lead to double spaces when concatenating below.
for _, text := range texts {
if strings.HasPrefix(text, " ") || strings.HasSuffix(text, " ") {
RecordBug(NewBug(fmt.Sprintf("trailing or leading spaces are unnecessary and need to be removed: %q", text), int(offset)))
}
}
ginkgoArgs = append(ginkgoArgs, offset)
text := strings.Join(texts, " ")
return ginkgoCall(text, ginkgoArgs...)
}
var (
tagRe = regexp.MustCompile(`\[.*?\]`)
deprecatedTags = sets.New("Conformance", "NodeConformance", "Disruptive", "Serial", "Slow")
deprecatedTagPrefixes = sets.New("Environment", "Feature", "NodeFeature", "FeatureGate")
deprecatedStability = sets.New("Alpha", "Beta")
)
// validateSpecs checks that the test specs were registered as intended.
func validateSpecs(specs types.SpecReports) {
checked := sets.New[call]()
for _, spec := range specs {
for i, text := range spec.ContainerHierarchyTexts {
c := call{
text: text,
location: spec.ContainerHierarchyLocations[i],
}
if checked.Has(c) {
// No need to check the same container more than once.
continue
}
checked.Insert(c)
validateText(c.location, text, spec.ContainerHierarchyLabels[i])
}
c := call{
text: spec.LeafNodeText,
location: spec.LeafNodeLocation,
}
if !checked.Has(c) {
validateText(spec.LeafNodeLocation, spec.LeafNodeText, spec.LeafNodeLabels)
checked.Insert(c)
}
}
}
// call acts as (mostly) unique identifier for a container node call like
// Describe or Context. It's not perfect because theoretically a line might
// have multiple calls with the same text, but that isn't a problem in
// practice.
type call struct {
text string
location types.CodeLocation
}
// validateText checks for some known tags that should not be added through the
// plain text strings anymore. Eventually, all such tags should get replaced
// with the new APIs.
func validateText(location types.CodeLocation, text string, labels []string) {
for _, tag := range tagRe.FindAllString(text, -1) {
if tag == "[]" {
recordTextBug(location, "[] in plain text is invalid")
continue
}
// Strip square brackets.
tag = tag[1 : len(tag)-1]
if slices.Contains(labels, tag) {
// Okay, was also set as label.
continue
}
if deprecatedTags.Has(tag) {
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s instead", tag, tag))
}
if deprecatedStability.Has(tag) {
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added by defining the feature gate through WithFeatureGate instead", tag))
}
if index := strings.Index(tag, ":"); index > 0 {
prefix := tag[:index]
if deprecatedTagPrefixes.Has(prefix) {
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s(%s) instead", tag, prefix, tag[index+1:]))
}
}
}
}
func recordTextBug(location types.CodeLocation, message string) {
RecordBug(Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message})
}
// WithEnvironment specifies that a certain test or group of tests only works
// with a feature available. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// The feature must be listed in ValidFeatures.
func WithFeature(name Feature) interface{} {
return withFeature(name)
}
// WithFeature is a shorthand for the corresponding package function.
func (f *Framework) WithFeature(name Feature) interface{} {
return withFeature(name)
}
func withFeature(name Feature) interface{} {
if !ValidFeatures.items.Has(name) {
RecordBug(NewBug(fmt.Sprintf("WithFeature: unknown feature %q", name), 2))
}
return newLabel("Feature", string(name))
}
// WithFeatureGate specifies that a certain test or group of tests depends on a
// feature gate being enabled. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// The feature gate must be listed in
// [k8s.io/apiserver/pkg/util/feature.DefaultMutableFeatureGate]. Once a
// feature gate gets removed from there, the WithFeatureGate calls using it
// also need to be removed.
func WithFeatureGate(featureGate featuregate.Feature) interface{} {
return withFeatureGate(featureGate)
}
// WithFeatureGate is a shorthand for the corresponding package function.
func (f *Framework) WithFeatureGate(featureGate featuregate.Feature) interface{} {
return withFeatureGate(featureGate)
}
func withFeatureGate(featureGate featuregate.Feature) interface{} {
spec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[featureGate]
if !ok {
RecordBug(NewBug(fmt.Sprintf("WithFeatureGate: the feature gate %q is unknown", featureGate), 2))
}
// We use mixed case (i.e. Beta instead of BETA). GA feature gates have no level string.
var level string
if spec.PreRelease != "" {
level = string(spec.PreRelease)
level = strings.ToUpper(level[0:1]) + strings.ToLower(level[1:])
}
l := newLabel("FeatureGate", string(featureGate))
l.extra = level
return l
}
// WithEnvironment specifies that a certain test or group of tests only works
// in a certain environment. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// The environment must be listed in ValidEnvironments.
func WithEnvironment(name Environment) interface{} {
return withEnvironment(name)
}
// WithEnvironment is a shorthand for the corresponding package function.
func (f *Framework) WithEnvironment(name Environment) interface{} {
return withEnvironment(name)
}
func withEnvironment(name Environment) interface{} {
if !ValidEnvironments.items.Has(name) {
RecordBug(NewBug(fmt.Sprintf("WithEnvironment: unknown environment %q", name), 2))
}
return newLabel("Environment", string(name))
}
// WithNodeFeature specifies that a certain test or group of tests only works
// if the node supports a certain feature. The return value must be passed as
// additional argument to [framework.It], [framework.Describe],
// [framework.Context].
//
// The environment must be listed in ValidNodeFeatures.
func WithNodeFeature(name NodeFeature) interface{} {
return withNodeFeature(name)
}
// WithNodeFeature is a shorthand for the corresponding package function.
func (f *Framework) WithNodeFeature(name NodeFeature) interface{} {
return withNodeFeature(name)
}
func withNodeFeature(name NodeFeature) interface{} {
if !ValidNodeFeatures.items.Has(name) {
RecordBug(NewBug(fmt.Sprintf("WithNodeFeature: unknown environment %q", name), 2))
}
return newLabel("NodeFeature", string(name))
}
// WithConformace specifies that a certain test or group of tests must pass in
// all conformant Kubernetes clusters. The return value must be passed as
// additional argument to [framework.It], [framework.Describe],
// [framework.Context].
func WithConformance() interface{} {
return withConformance()
}
// WithConformance is a shorthand for the corresponding package function.
func (f *Framework) WithConformance() interface{} {
return withConformance()
}
func withConformance() interface{} {
return newLabel("Conformance")
}
// WithNodeConformance specifies that a certain test or group of tests for node
// functionality that does not depend on runtime or Kubernetes distro specific
// behavior. The return value must be passed as additional argument to
// [framework.It], [framework.Describe], [framework.Context].
func WithNodeConformance() interface{} {
return withNodeConformance()
}
// WithNodeConformance is a shorthand for the corresponding package function.
func (f *Framework) WithNodeConformance() interface{} {
return withNodeConformance()
}
func withNodeConformance() interface{} {
return newLabel("NodeConformance")
}
// WithDisruptive specifies that a certain test or group of tests temporarily
// affects the functionality of the Kubernetes cluster. The return value must
// be passed as additional argument to [framework.It], [framework.Describe],
// [framework.Context].
func WithDisruptive() interface{} {
return withDisruptive()
}
// WithDisruptive is a shorthand for the corresponding package function.
func (f *Framework) WithDisruptive() interface{} {
return withDisruptive()
}
func withDisruptive() interface{} {
return newLabel("Disruptive")
}
// WithSerial specifies that a certain test or group of tests must not run in
// parallel with other tests. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// Starting with ginkgo v2, serial and parallel tests can be executed in the
// same invocation. Ginkgo itself will ensure that the serial tests run
// sequentially.
func WithSerial() interface{} {
return withSerial()
}
// WithSerial is a shorthand for the corresponding package function.
func (f *Framework) WithSerial() interface{} {
return withSerial()
}
func withSerial() interface{} {
return newLabel("Serial")
}
// WithSlow specifies that a certain test or group of tests must not run in
// parallel with other tests. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
func WithSlow() interface{} {
return withSlow()
}
// WithSlow is a shorthand for the corresponding package function.
func (f *Framework) WithSlow() interface{} {
return WithSlow()
}
func withSlow() interface{} {
return newLabel("Slow")
}
// WithLabel is a wrapper around [ginkgo.Label]. Besides adding an arbitrary
// label to a test, it also injects the label in square brackets into the test
// name.
func WithLabel(label string) interface{} {
return withLabel(label)
}
// WithLabel is a shorthand for the corresponding package function.
func (f *Framework) WithLabel(label string) interface{} {
return withLabel(label)
}
func withLabel(label string) interface{} {
return newLabel(label)
}
type label struct {
// parts get concatenated with ":" to build the full label.
parts []string
// extra is an optional fully-formed extra label.
extra string
}
func newLabel(parts ...string) label {
return label{parts: parts}
}
// TagsEqual can be used to check whether two tags are the same.
// It's safe to compare e.g. the result of WithSlow() against the result
// of WithSerial(), the result will be false. False is also returned
// when a parameter is some completely different value.
func TagsEqual(a, b interface{}) bool {
al, ok := a.(label)
if !ok {
return false
}
bl, ok := b.(label)
if !ok {
return false
}
if al.extra != bl.extra {
return false
}
return slices.Equal(al.parts, bl.parts)
}

View File

@ -36,6 +36,10 @@ func WriteJUnitReport(report ginkgo.Report, filename string) error {
// both, then tools like kettle and spyglass would concatenate
// the two strings and thus show duplicated information.
OmitFailureMessageAttr: true,
// All labels are also part of the spec texts in inline [] tags,
// so we don't need to write them separately.
OmitSpecLabels: true,
}
return reporters.GenerateJUnitReportWithConfig(report, filename, config)

View File

@ -181,21 +181,34 @@ func (g *Grabber) GrabFromKubelet(ctx context.Context, nodeName string) (Kubelet
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
}
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort))
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort), "metrics")
}
func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int) (KubeletMetrics, error) {
// GrabresourceMetricsFromKubelet returns resource metrics from kubelet
func (g *Grabber) GrabResourceMetricsFromKubelet(ctx context.Context, nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
if err != nil {
return KubeletMetrics{}, err
}
if len(nodes.Items) != 1 {
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
}
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort), "metrics/resource")
}
func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int, pathSuffix string) (KubeletMetrics, error) {
if kubeletPort <= 0 || kubeletPort > 65535 {
return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering", kubeletPort)
}
output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort))
output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort), pathSuffix)
if err != nil {
return KubeletMetrics{}, err
}
return parseKubeletMetrics(output)
}
func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int) (string, error) {
func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int, pathSuffix string) (string, error) {
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
finished := make(chan struct{}, 1)
var err error
@ -205,7 +218,7 @@ func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubel
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
Suffix("metrics").
Suffix(pathSuffix).
Do(ctx).Raw()
finished <- struct{}{}
}()
@ -239,7 +252,7 @@ func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, erro
var lastMetricsFetchErr error
var output string
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@ -290,7 +303,7 @@ func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerMana
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@ -329,7 +342,7 @@ func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@ -432,7 +445,7 @@ func (g *Grabber) Grab(ctx context.Context) (Collection, error) {
} else {
for _, node := range nodes.Items {
kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port
metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort))
metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort), "metrics")
if err != nil {
errs = append(errs, err)
}

View File

@ -22,6 +22,7 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -63,7 +64,7 @@ func ExpectNodeHasLabel(ctx context.Context, c clientset.Interface, nodeName str
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(node.Labels[labelKey], labelValue)
gomega.Expect(node.Labels).To(gomega.HaveKeyWithValue(labelKey, labelValue))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
@ -120,7 +121,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@ -141,7 +142,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
if err != nil && !wait.Interrupted(err) {
return err
}

View File

@ -36,7 +36,7 @@ func WaitForSSHTunnels(ctx context.Context, namespace string) {
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})

View File

@ -51,7 +51,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
@ -96,7 +96,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
if err != nil && !wait.Interrupted(err) {
return err
}
@ -192,7 +192,7 @@ func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout ti
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) {
if wait.PollUntilContextTimeout(ctx, poll, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})

View File

@ -24,8 +24,8 @@ import (
)
// Get creates a function which retrieves the pod anew each time the function
// is called. Fatal errors are detected by framework.HandleRetry and cause
// is called. Fatal errors are detected by framework.GetObject and cause
// polling to stop.
func Get(c clientset.Interface, pod framework.NamedObject) framework.GetFunc[*v1.Pod] {
return framework.HandleRetry(framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{}))
return framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{})
}

View File

@ -297,7 +297,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume
var lastCreateErr error
err := wait.PollImmediateWithContext(ctx, 29*time.Second, timeouts.PVCreate, func(ctx context.Context) (done bool, err error) {
err := wait.PollUntilContextTimeout(ctx, 29*time.Second, timeouts.PVCreate, true, func(ctx context.Context) (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.
@ -648,7 +648,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
Spec: v1.PersistentVolumeClaimSpec{
Selector: cfg.Selector,
AccessModes: cfg.AccessModes,
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(cfg.ClaimSize),
},

View File

@ -86,6 +86,11 @@ func GetSigner(provider string) (ssh.Signer, error) {
if keyfile == "" {
keyfile = "id_rsa"
}
case "azure":
keyfile = os.Getenv("AZURE_SSH_KEY")
if keyfile == "" {
keyfile = "id_rsa"
}
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
@ -422,7 +427,7 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) {
if wait.PollUntilContextTimeout(ctx, pollNodeInterval, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})

View File

@ -23,9 +23,11 @@ import (
"errors"
"flag"
"fmt"
"io"
"math"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
@ -36,6 +38,7 @@ import (
"github.com/onsi/gomega"
gomegaformat "github.com/onsi/gomega/format"
"k8s.io/apimachinery/pkg/util/sets"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cliflag "k8s.io/component-base/cli/flag"
@ -53,6 +56,20 @@ const (
DefaultNumNodes = -1
)
var (
// Output is used for output when not running tests, for example in -list-tests.
// Test output should go to ginkgo.GinkgoWriter.
Output io.Writer = os.Stdout
// Exit is called when the framework detects fatal errors or when
// it is done with the execution of e.g. -list-tests.
Exit = os.Exit
// CheckForBugs determines whether the framework bails out when
// test initialization found any bugs.
CheckForBugs = true
)
// TestContextType contains test settings and global state. Due to
// historic reasons, it is a mixture of items managed by the test
// framework itself, cloud providers and individual tests.
@ -82,18 +99,21 @@ const (
// Test suite authors can use framework/viper to make all command line
// parameters also configurable via a configuration file.
type TestContextType struct {
KubeConfig string
KubeContext string
KubeAPIContentType string
KubeletRootDir string
CertDir string
Host string
BearerToken string `datapolicy:"token"`
KubeConfig string
KubeContext string
KubeAPIContentType string
KubeletRootDir string
KubeletConfigDropinDir string
CertDir string
Host string
BearerToken string `datapolicy:"token"`
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
RepoRoot string
// ListImages will list off all images that are used then quit
ListImages bool
listTests, listLabels bool
// ListConformanceTests will list off all conformance tests that are available then quit
ListConformanceTests bool
@ -356,6 +376,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.")
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for running tests.")
flags.BoolVar(&TestContext.listLabels, "list-labels", false, "If true, will show the list of labels that can be used to select tests via -ginkgo.label-filter.")
flags.BoolVar(&TestContext.listTests, "list-tests", false, "If true, will show the full names of all tests (aka specs) that can be used to select test via -ginkgo.focus/skip.")
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
@ -482,7 +504,7 @@ func AfterReadingAllFlags(t *TestContextType) {
for _, v := range image.GetImageConfigs() {
fmt.Println(v.GetE2EImage())
}
os.Exit(0)
Exit(0)
}
// Reconfigure gomega defaults. The poll interval should be suitable
@ -494,6 +516,20 @@ func AfterReadingAllFlags(t *TestContextType) {
gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart)
gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort)
// ginkgo.PreviewSpecs will expand all nodes and thus may find new bugs.
report := ginkgo.PreviewSpecs("Kubernetes e2e test statistics")
validateSpecs(report.SpecReports)
if err := FormatBugs(); CheckForBugs && err != nil {
// Refuse to do anything if the E2E suite is buggy.
fmt.Fprint(Output, "ERROR: E2E suite initialization was faulty, these errors must be fixed:")
fmt.Fprint(Output, "\n"+err.Error())
Exit(1)
}
if t.listLabels || t.listTests {
listTestInformation(report)
Exit(0)
}
// Only set a default host if one won't be supplied via kubeconfig
if len(t.Host) == 0 && len(t.KubeConfig) == 0 {
// Check if we can use the in-cluster config
@ -553,7 +589,7 @@ func AfterReadingAllFlags(t *TestContextType) {
} else {
klog.Errorf("Failed to setup provider config for %q: %v", TestContext.Provider, err)
}
os.Exit(1)
Exit(1)
}
if TestContext.ReportDir != "" {
@ -563,13 +599,13 @@ func AfterReadingAllFlags(t *TestContextType) {
// in parallel, so we will get "exists" error in most of them.
if err := os.MkdirAll(TestContext.ReportDir, 0777); err != nil && !os.IsExist(err) {
klog.Errorf("Create report dir: %v", err)
os.Exit(1)
Exit(1)
}
ginkgoDir := path.Join(TestContext.ReportDir, "ginkgo")
if TestContext.ReportCompleteGinkgo || TestContext.ReportCompleteJUnit {
if err := os.MkdirAll(ginkgoDir, 0777); err != nil && !os.IsExist(err) {
klog.Errorf("Create <report-dir>/ginkgo: %v", err)
os.Exit(1)
Exit(1)
}
}
@ -600,3 +636,47 @@ func AfterReadingAllFlags(t *TestContextType) {
})
}
}
func listTestInformation(report ginkgo.Report) {
indent := strings.Repeat(" ", 4)
if TestContext.listLabels {
labels := sets.New[string]()
for _, spec := range report.SpecReports {
if spec.LeafNodeType == types.NodeTypeIt {
labels.Insert(spec.Labels()...)
}
}
fmt.Fprintf(Output, "The following labels can be used with 'gingko run --label-filter':\n%s%s\n\n", indent, strings.Join(sets.List(labels), "\n"+indent))
}
if TestContext.listTests {
leafs := make([][]string, 0, len(report.SpecReports))
wd, _ := os.Getwd()
for _, spec := range report.SpecReports {
if spec.LeafNodeType == types.NodeTypeIt {
leafs = append(leafs, []string{fmt.Sprintf("%s:%d: ", relativePath(wd, spec.LeafNodeLocation.FileName), spec.LeafNodeLocation.LineNumber), spec.FullText()})
}
}
// Sort by test name, not the source code location, because the test
// name is more stable across code refactoring.
sort.Slice(leafs, func(i, j int) bool {
return leafs[i][1] < leafs[j][1]
})
fmt.Fprint(Output, "The following spec names can be used with 'ginkgo run --focus/skip':\n")
for _, leaf := range leafs {
fmt.Fprintf(Output, "%s%s%s\n", indent, leaf[0], leaf[1])
}
fmt.Fprint(Output, "\n")
}
}
func relativePath(wd, path string) string {
if wd == "" {
return path
}
relpath, err := filepath.Rel(wd, path)
if err != nil {
return path
}
return relpath
}

View File

@ -39,6 +39,7 @@ var defaultTimeouts = TimeoutContext{
SystemPodsStartup: 10 * time.Minute,
NodeSchedulable: 30 * time.Minute,
SystemDaemonsetStartup: 5 * time.Minute,
NodeNotReady: 3 * time.Minute,
}
// TimeoutContext contains timeout settings for several actions.
@ -106,6 +107,9 @@ type TimeoutContext struct {
// SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready.
SystemDaemonsetStartup time.Duration
// NodeNotReady is how long to wait for a node to be not ready.
NodeNotReady time.Duration
}
// NewTimeoutContext returns a TimeoutContext with all values set either to

View File

@ -136,7 +136,7 @@ var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
// ProvidersWithSSH are those providers where each node is accessible with SSH
ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"}
// ServeHostnameImage is a serve hostname image name.
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
@ -352,7 +352,7 @@ func CreateTestingNS(ctx context.Context, baseName string, c clientset.Interface
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediateWithContext(ctx, Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, Poll, 30*time.Second, true, func(ctx context.Context) (bool, error) {
var err error
got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{})
if err != nil {
@ -808,7 +808,7 @@ retriesLoop:
if errs.Len() > 0 {
Failf("Unexpected error(s): %v", strings.Join(errs.List(), "\n - "))
}
ExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents))
gomega.Expect(expectedWatchEvents).To(gomega.HaveLen(totalValidWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents))
break retriesLoop
}
}

View File

@ -238,7 +238,7 @@ func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName stri
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) {
waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.
@ -697,7 +697,7 @@ func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exi
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
framework.ExpectEqual(actualExitCode, exitCode,
gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
@ -706,5 +706,5 @@ func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exi
shExec, exitCode, err, stdout, stderr)
}
}
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}

View File

@ -16,9 +16,7 @@ limitations under the License.
package utils
import "github.com/onsi/ginkgo/v2"
import "k8s.io/kubernetes/test/e2e/framework"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-storage] "+text, body)
}
var SIGDescribe = framework.SIGDescribe("storage")

View File

@ -70,8 +70,7 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
framework.ExpectEqual(expectedFSGroup, fsGroupResult,
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
gomega.Expect(expectedFSGroup).To(gomega.Equal(fsGroupResult), "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
}
// getKubeletMainPid return the Main PID of the Kubelet Process
@ -141,14 +140,14 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount to be found.")
result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
ginkgo.By("Writing to the volume.")
@ -201,7 +200,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Testing that written file is accessible in the second pod.")
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
@ -262,13 +261,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c client
result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Expecting the symlinks from global map path to be found.")
result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
@ -699,7 +698,7 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid)
framework.ExpectEqual(ll[3], expectedGid)
gomega.Expect(ll[3]).To(gomega.Equal(expectedGid))
}
// ChangeFilePathGidInPod changes the GID of the target filepath.

View File

@ -11,6 +11,8 @@ spec:
metadata:
labels:
app: httpd
annotations:
annotations_app: annotations_httpd
spec:
containers:
- name: httpd

View File

@ -31,6 +31,9 @@ spec:
- name: dev
hostPath:
path: /dev
- name: cdi-dir
hostPath:
path: /var/run/cdi
containers:
- image: registry.k8s.io/e2e-test-images/sample-device-plugin:1.3
name: sample-device-plugin
@ -46,5 +49,7 @@ spec:
mountPath: /var/lib/kubelet/plugins_registry
- name: dev
mountPath: /dev
- name: cdi-dir
mountPath: /var/run/cdi
updateStrategy:
type: RollingUpdate

View File

@ -52,8 +52,8 @@ spec:
# Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/
# and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS.
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 - suitable for COS M97 as per https://cloud.google.com/container-optimized-os/docs/release-notes
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.27
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.1.9 - suitable for COS M109 as per https://cloud.google.com/container-optimized-os/docs/release-notes
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.1.9
name: nvidia-driver-installer
resources:
requests:

View File

@ -238,11 +238,11 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.7"}
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"}
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.3"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.10-0"}
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}

View File

@ -1502,7 +1502,7 @@ func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeC
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
StorageClassName: &storageClass,
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},