mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to v1.25.0
update kubernetes to latest v1.25.0 release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
f47839d73d
commit
e3bf375035
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -7,6 +7,7 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/api/v1/resource
|
||||
- k8s.io/kubernetes/pkg/api/v1/service
|
||||
- k8s.io/kubernetes/pkg/api/pod
|
||||
- k8s.io/kubernetes/pkg/api/node
|
||||
- k8s.io/kubernetes/pkg/apis/apps
|
||||
- k8s.io/kubernetes/pkg/apis/apps/validation
|
||||
- k8s.io/kubernetes/pkg/apis/autoscaling
|
||||
@ -23,6 +24,7 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/apis/core/validation
|
||||
- k8s.io/kubernetes/pkg/apis/extensions
|
||||
- k8s.io/kubernetes/pkg/apis/networking
|
||||
- k8s.io/kubernetes/pkg/apis/node
|
||||
- k8s.io/kubernetes/pkg/apis/policy
|
||||
- k8s.io/kubernetes/pkg/apis/policy/validation
|
||||
- k8s.io/kubernetes/pkg/apis/scheduling
|
||||
|
167
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
167
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
@ -1,167 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1"
|
||||
v1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const (
|
||||
policyCachePollInterval = 100 * time.Millisecond
|
||||
policyCachePollTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
type bindingsGetter interface {
|
||||
v1rbac.RoleBindingsGetter
|
||||
v1rbac.ClusterRoleBindingsGetter
|
||||
v1rbac.ClusterRolesGetter
|
||||
}
|
||||
|
||||
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error {
|
||||
return WaitForNamedAuthorizationUpdate(c, user, namespace, verb, "", resource, allowed)
|
||||
}
|
||||
|
||||
// WaitForNamedAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error {
|
||||
review := &authorizationv1.SubjectAccessReview{
|
||||
Spec: authorizationv1.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationv1.ResourceAttributes{
|
||||
Group: resource.Group,
|
||||
Verb: verb,
|
||||
Resource: resource.Resource,
|
||||
Namespace: namespace,
|
||||
Name: resourceName,
|
||||
},
|
||||
User: user,
|
||||
},
|
||||
}
|
||||
|
||||
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
|
||||
response, err := c.SubjectAccessReviews().Create(context.TODO(), review, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if response.Status.Allowed != allowed {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// BindClusterRole binds the cluster role at the cluster scope. If RBAC is not enabled, nil
|
||||
// is returned with no action.
|
||||
func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error {
|
||||
if !IsRBACEnabled(c) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + clusterRole,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole,
|
||||
},
|
||||
Subjects: subjects,
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("binding clusterrole/%s for %q for %v: %w", clusterRole, ns, subjects, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BindClusterRoleInNamespace binds the cluster role at the namespace scope. If RBAC is not enabled, nil
|
||||
// is returned with no action.
|
||||
func BindClusterRoleInNamespace(c bindingsGetter, clusterRole, ns string, subjects ...rbacv1.Subject) error {
|
||||
return bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...)
|
||||
}
|
||||
|
||||
// BindRoleInNamespace binds the role at the namespace scope. If RBAC is not enabled, nil
|
||||
// is returned with no action.
|
||||
func BindRoleInNamespace(c bindingsGetter, role, ns string, subjects ...rbacv1.Subject) error {
|
||||
return bindInNamespace(c, "Role", role, ns, subjects...)
|
||||
}
|
||||
|
||||
func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rbacv1.Subject) error {
|
||||
if !IsRBACEnabled(c) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.RoleBindings(ns).Create(context.TODO(), &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + role,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: roleType,
|
||||
Name: role,
|
||||
},
|
||||
Subjects: subjects,
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("binding %s/%s into %q for %v: %w", roleType, role, ns, subjects, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
isRBACEnabledOnce sync.Once
|
||||
isRBACEnabled bool
|
||||
)
|
||||
|
||||
// IsRBACEnabled returns true if RBAC is enabled. Otherwise false.
|
||||
func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool {
|
||||
isRBACEnabledOnce.Do(func() {
|
||||
crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
isRBACEnabled = false
|
||||
} else if crs == nil || len(crs.Items) == 0 {
|
||||
e2elog.Logf("No ClusterRoles found; assuming RBAC is disabled.")
|
||||
isRBACEnabled = false
|
||||
} else {
|
||||
e2elog.Logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
isRBACEnabled = true
|
||||
}
|
||||
})
|
||||
|
||||
return isRBACEnabled
|
||||
}
|
38
vendor/k8s.io/kubernetes/test/e2e/framework/config/config.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/framework/config/config.go
generated
vendored
@ -22,13 +22,13 @@ limitations under the License.
|
||||
// The command line flags all get stored in a private flag set. The
|
||||
// developer of the E2E test suite decides how they are exposed. Options
|
||||
// include:
|
||||
// - exposing as normal flags in the actual command line:
|
||||
// CopyFlags(Flags, flag.CommandLine)
|
||||
// - populate via test/e2e/framework/viperconfig:
|
||||
// viperconfig.ViperizeFlags("my-config.yaml", "", Flags)
|
||||
// - a combination of both:
|
||||
// CopyFlags(Flags, flag.CommandLine)
|
||||
// viperconfig.ViperizeFlags("my-config.yaml", "", flag.CommandLine)
|
||||
// - exposing as normal flags in the actual command line:
|
||||
// CopyFlags(Flags, flag.CommandLine)
|
||||
// - populate via test/e2e/framework/viperconfig:
|
||||
// viperconfig.ViperizeFlags("my-config.yaml", "", Flags)
|
||||
// - a combination of both:
|
||||
// CopyFlags(Flags, flag.CommandLine)
|
||||
// viperconfig.ViperizeFlags("my-config.yaml", "", flag.CommandLine)
|
||||
//
|
||||
// Instead of defining flags one-by-one, test developers annotate a
|
||||
// structure with tags and then call a single function. This is the
|
||||
@ -38,16 +38,16 @@ limitations under the License.
|
||||
//
|
||||
// For example, a file storage/csi.go might define:
|
||||
//
|
||||
// var scaling struct {
|
||||
// NumNodes int `default:"1" description:"number of nodes to run on"`
|
||||
// Master string
|
||||
// }
|
||||
// _ = config.AddOptions(&scaling, "storage.csi.scaling")
|
||||
// var scaling struct {
|
||||
// NumNodes int `default:"1" description:"number of nodes to run on"`
|
||||
// Master string
|
||||
// }
|
||||
// _ = config.AddOptions(&scaling, "storage.csi.scaling")
|
||||
//
|
||||
// This defines the following command line flags:
|
||||
//
|
||||
// -storage.csi.scaling.numNodes=<int> - number of nodes to run on (default: 1)
|
||||
// -storage.csi.scaling.master=<string>
|
||||
// -storage.csi.scaling.numNodes=<int> - number of nodes to run on (default: 1)
|
||||
// -storage.csi.scaling.master=<string>
|
||||
//
|
||||
// All fields in the structure must be exported and have one of the following
|
||||
// types (same as in the `flag` package):
|
||||
@ -63,10 +63,10 @@ limitations under the License.
|
||||
//
|
||||
// Each basic entry may have a tag with these optional keys:
|
||||
//
|
||||
// usage: additional explanation of the option
|
||||
// default: the default value, in the same format as it would
|
||||
// be given on the command line and true/false for
|
||||
// a boolean
|
||||
// usage: additional explanation of the option
|
||||
// default: the default value, in the same format as it would
|
||||
// be given on the command line and true/false for
|
||||
// a boolean
|
||||
//
|
||||
// The names of the final configuration options are a combination of an
|
||||
// optional common prefix for all options in the structure and the
|
||||
@ -135,7 +135,7 @@ func AddOptionsToSet(flags *flag.FlagSet, options interface{}, prefix string) bo
|
||||
if optionsType == nil {
|
||||
panic("options parameter without a type - nil?!")
|
||||
}
|
||||
if optionsType.Kind() != reflect.Ptr || optionsType.Elem().Kind() != reflect.Struct {
|
||||
if optionsType.Kind() != reflect.Pointer || optionsType.Elem().Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("need a pointer to a struct, got instead: %T", options))
|
||||
}
|
||||
addStructFields(flags, optionsType.Elem(), reflect.Indirect(reflect.ValueOf(options)), prefix)
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -17,7 +17,10 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
// ExpectEqual expects the specified two are the same, otherwise an exception raises
|
||||
@ -43,7 +46,34 @@ func ExpectNoError(err error, explain ...interface{}) {
|
||||
// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Errors usually contain unexported fields. We have to use
|
||||
// a formatter here which can print those.
|
||||
prefix := ""
|
||||
if len(explain) > 0 {
|
||||
if str, ok := explain[0].(string); ok {
|
||||
prefix = fmt.Sprintf(str, explain[1:]...) + ": "
|
||||
} else {
|
||||
prefix = fmt.Sprintf("unexpected explain arguments, need format string: %v", explain)
|
||||
}
|
||||
}
|
||||
|
||||
// This intentionally doesn't use gomega.Expect. Instead we take
|
||||
// full control over what information is presented where:
|
||||
// - The complete error object is logged because it may contain
|
||||
// additional information that isn't included in its error
|
||||
// string.
|
||||
// - It is not included in the failure message because
|
||||
// it might make the failure message very large and/or
|
||||
// cause error aggregation to work less well: two
|
||||
// failures at the same code line might not be matched in
|
||||
// https://go.k8s.io/triage because the error details are too
|
||||
// different.
|
||||
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
|
||||
Fail(prefix+err.Error(), 1+offset)
|
||||
}
|
||||
|
||||
// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter.
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
@ -53,7 +53,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
|
||||
return
|
||||
}
|
||||
msg := fmt.Sprintf("Unexpected error occurred: %v", err)
|
||||
desc := buildDescription(optionalDescription)
|
||||
desc := buildDescription(optionalDescription...)
|
||||
if desc != "" {
|
||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -49,7 +49,7 @@ import (
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
@ -83,7 +83,6 @@ type Framework struct {
|
||||
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
|
||||
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
|
||||
|
||||
gatherer *ContainerResourceGatherer
|
||||
@ -195,7 +194,7 @@ func (f *Framework) BeforeEach() {
|
||||
f.beforeEachStarted = true
|
||||
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/issues/222
|
||||
// https://github.com/onsi/ginkgo/v2/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.AfterEach)
|
||||
if f.ClientSet == nil {
|
||||
ginkgo.By("Creating a kubernetes client")
|
||||
@ -391,7 +390,7 @@ func (f *Framework) AfterEach() {
|
||||
// Whether to delete namespace is determined by 3 factors: delete-namespace flag, delete-namespace-on-failure flag and the test result
|
||||
// if delete-namespace set to false, namespace will always be preserved.
|
||||
// if delete-namespace is true and delete-namespace-on-failure is false, namespace will be preserved if test failed.
|
||||
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) {
|
||||
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentSpecReport().Failed()) {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
|
||||
if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil {
|
||||
@ -399,7 +398,7 @@ func (f *Framework) AfterEach() {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
|
||||
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
}
|
||||
} else {
|
||||
@ -433,7 +432,7 @@ func (f *Framework) AfterEach() {
|
||||
|
||||
// run all aftereach functions in random order to ensure no dependencies grow
|
||||
for _, afterEachFn := range f.afterEaches {
|
||||
afterEachFn(f, ginkgo.CurrentGinkgoTestDescription().Failed)
|
||||
afterEachFn(f, ginkgo.CurrentSpecReport().Failed())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
@ -511,7 +510,7 @@ func (f *Framework) DeleteNamespace(name string) {
|
||||
}
|
||||
}()
|
||||
// if current test failed then we should dump namespace information
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, name)
|
||||
}
|
||||
|
||||
@ -545,17 +544,13 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
// fail to create serviceAccount in it.
|
||||
f.AddNamespacesToDelete(ns)
|
||||
|
||||
if err == nil && !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f.ClientSet, ns.Name)
|
||||
}
|
||||
|
||||
return ns, err
|
||||
}
|
||||
|
||||
// RecordFlakeIfError records flakeness info if error happens.
|
||||
// NOTE: This function is not used at any places yet, but we are in progress for https://github.com/kubernetes/kubernetes/issues/66239 which requires this. Please don't remove this.
|
||||
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
|
||||
f.flakeReport.RecordFlakeIfError(err, optionalDescription)
|
||||
f.flakeReport.RecordFlakeIfError(err, optionalDescription...)
|
||||
}
|
||||
|
||||
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
|
||||
@ -648,8 +643,8 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
}
|
||||
|
||||
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
|
||||
func ConformanceIt(text string, body interface{}, timeout ...float64) bool {
|
||||
return ginkgo.It(text+" [Conformance]", body, timeout...)
|
||||
func ConformanceIt(text string, body interface{}) bool {
|
||||
return ginkgo.It(text+" [Conformance]", ginkgo.Offset(1), body)
|
||||
}
|
||||
|
||||
// PodStateVerification represents a verification of pod state.
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
// FailurePanic is the value that will be panicked from Fail.
|
||||
@ -37,8 +37,18 @@ type FailurePanic struct {
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
const ginkgoFailurePanic = `
|
||||
Your test failed.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
defer GinkgoRecover()
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
|
||||
// String makes FailurePanic look like the old Ginkgo panic when printed.
|
||||
func (FailurePanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
func (FailurePanic) String() string { return ginkgoFailurePanic }
|
||||
|
||||
// Fail wraps ginkgo.Fail so that it panics with more useful
|
||||
// information about the failure. This function will panic with a
|
||||
@ -69,7 +79,7 @@ func Fail(message string, callerSkip ...int) {
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
@ -33,7 +33,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
@ -63,7 +63,7 @@ func Fail(msg string, callerSkip ...int) {
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
|
||||
}
|
||||
|
||||
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`)
|
||||
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)
|
||||
|
||||
// PrunedStack is a wrapper around debug.Stack() that removes information
|
||||
// about the current goroutine and optionally skips some of the initial stack entries.
|
||||
@ -71,11 +71,11 @@ var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`)
|
||||
// From the remaining entries it automatically filters out useless ones like
|
||||
// entries coming from Ginkgo.
|
||||
//
|
||||
// This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25:
|
||||
// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter)
|
||||
// - source code filtering updated to be specific to Kubernetes
|
||||
// - optimized to use bytes and in-place slice filtering from
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
// This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/v2/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25:
|
||||
// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter)
|
||||
// - source code filtering updated to be specific to Kubernetes
|
||||
// - optimized to use bytes and in-place slice filtering from
|
||||
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
|
||||
func PrunedStack(skip int) []byte {
|
||||
fullStackTrace := debug.Stack()
|
||||
stack := bytes.Split(fullStackTrace, []byte("\n"))
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
)
|
||||
|
38
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -377,6 +377,42 @@ func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
|
||||
return &nodes.Items[rand.Intn(len(nodes.Items))], nil
|
||||
}
|
||||
|
||||
// GetSubnetPrefix gets first 2 number of an IP in the node subnet. [IPv4]
|
||||
// It assumes that the subnet mask is /16.
|
||||
func GetSubnetPrefix(c clientset.Interface) ([]string, error) {
|
||||
node, err := GetReadySchedulableWorkerNode(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %v", err)
|
||||
}
|
||||
internalIP, err := GetInternalIP(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting Node internal IP, err: %v", err)
|
||||
}
|
||||
splitted := strings.Split(internalIP, ".")
|
||||
if len(splitted) == 4 {
|
||||
return splitted[:2], nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid IP address format: %s", internalIP)
|
||||
}
|
||||
|
||||
// GetReadySchedulableWorkerNode gets a single worker node which is available for
|
||||
// running pods on. If there are no such available nodes it will return an error.
|
||||
func GetReadySchedulableWorkerNode(c clientset.Interface) (*v1.Node, error) {
|
||||
nodes, err := GetReadySchedulableNodes(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := nodes.Items[i]
|
||||
_, isMaster := node.Labels["node-role.kubernetes.io/master"]
|
||||
_, isControlPlane := node.Labels["node-role.kubernetes.io/control-plane"]
|
||||
if !isMaster && !isControlPlane {
|
||||
return &node, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("there are currently no ready, schedulable worker nodes in the cluster")
|
||||
}
|
||||
|
||||
// GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted.
|
||||
// There are cases when we care about tainted nodes
|
||||
// E.g. in tests related to nodes with gpu we care about nodes despite
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -34,7 +34,7 @@ import (
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const etcdImage = "3.5.3-0"
|
||||
const etcdImage = "3.5.4-0"
|
||||
|
||||
// EtcdUpgrade upgrades etcd on GCE.
|
||||
func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -28,11 +28,6 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var (
|
||||
// BusyBoxImage is the image URI of BusyBox.
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
)
|
||||
|
||||
// Config is a struct containing all arguments for creating a pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as boolean arguments.
|
||||
type Config struct {
|
||||
@ -47,7 +42,7 @@ type Config struct {
|
||||
SeLinuxLabel *v1.SELinuxOptions
|
||||
FsGroup *int64
|
||||
NodeSelection NodeSelection
|
||||
ImageID int
|
||||
ImageID imageutils.ImageID
|
||||
PodFSGroupChangePolicy *v1.PodFSGroupChangePolicy
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
142
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
142
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -25,16 +25,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -155,70 +153,6 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, errPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
e2elog.Logf("The status of Pod %s is %s which is unexpected", podName, pod.Status.Phase)
|
||||
return false, errPodCompleted
|
||||
case v1.PodRunning:
|
||||
e2elog.Logf("The status of Pod %s is %s (Ready = %v)", podName, pod.Status.Phase, podutils.IsPodReady(pod))
|
||||
return podutils.IsPodReady(pod), nil
|
||||
}
|
||||
e2elog.Logf("The status of Pod %s is %s, waiting for it to be Running (with Ready = true)", podName, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodPending:
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PodsCreated returns a pod list matched by the given name.
|
||||
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
@ -305,60 +239,6 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
|
||||
return e
|
||||
}
|
||||
|
||||
func podContainerFailed(c clientset.Interface, namespace, podName string, containerIndex int, reason string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodPending:
|
||||
if len(pod.Status.ContainerStatuses) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
containerStatus := pod.Status.ContainerStatuses[containerIndex]
|
||||
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == reason {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
case v1.PodFailed, v1.PodRunning, v1.PodSucceeded:
|
||||
return false, fmt.Errorf("pod was expected to be pending, but it is in the state: %s", pod.Status.Phase)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if containerIndex > len(pod.Status.ContainerStatuses)-1 {
|
||||
return false, nil
|
||||
}
|
||||
containerStatus := pod.Status.ContainerStatuses[containerIndex]
|
||||
return *containerStatus.Started, nil
|
||||
}
|
||||
}
|
||||
|
||||
func isContainerRunning(c clientset.Interface, namespace, podName, containerName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
|
||||
for _, cs := range statuses {
|
||||
if cs.Name == containerName {
|
||||
return cs.State.Running != nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LogPodStates logs basic info of provided pods for debugging.
|
||||
func LogPodStates(pods []v1.Pod) {
|
||||
// Find maximum widths for pod, node, and phase strings for column printing.
|
||||
@ -565,13 +445,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
|
||||
}
|
||||
execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
|
||||
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return retrievedPod.Status.Phase == v1.PodRunning, nil
|
||||
})
|
||||
err = WaitForPodNameRunningInNamespace(client, execPod.Name, execPod.Namespace)
|
||||
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
|
||||
return execPod
|
||||
}
|
||||
@ -769,3 +643,15 @@ func IsPodActive(p *v1.Pod) bool {
|
||||
v1.PodFailed != p.Status.Phase &&
|
||||
p.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
func podIdentifier(namespace, name string) string {
|
||||
return fmt.Sprintf("%s/%s", namespace, name)
|
||||
}
|
||||
|
||||
func identifier(pod *v1.Pod) string {
|
||||
id := podIdentifier(pod.Namespace, pod.Name)
|
||||
if pod.UID != "" {
|
||||
id += fmt.Sprintf("(%s)", pod.UID)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
111
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
111
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -18,9 +18,14 @@ package pod
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
psaapi "k8s.io/pod-security-admission/api"
|
||||
psapolicy "k8s.io/pod-security-admission/policy"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@ -56,14 +61,14 @@ func GetDefaultTestImage() string {
|
||||
// If the node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
// If the node OS is linux, return busybox image
|
||||
func GetDefaultTestImageID() int {
|
||||
func GetDefaultTestImageID() imageutils.ImageID {
|
||||
return GetTestImageID(imageutils.BusyBox)
|
||||
}
|
||||
|
||||
// GetTestImage returns the image name with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImage(id int) string {
|
||||
func GetTestImage(id imageutils.ImageID) string {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
}
|
||||
@ -73,13 +78,23 @@ func GetTestImage(id int) string {
|
||||
// GetTestImageID returns the image id with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImageID(id int) int {
|
||||
func GetTestImageID(id imageutils.ImageID) imageutils.ImageID {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return imageutils.Agnhost
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// GetDefaultNonRootUser returns default non root user
|
||||
// If the Node OS is windows, we return nill due to issue with invalid permissions set on projected volumes
|
||||
// https://github.com/kubernetes/kubernetes/issues/102849
|
||||
func GetDefaultNonRootUser() *int64 {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return pointer.Int64(DefaultNonRootUser)
|
||||
}
|
||||
|
||||
// GeneratePodSecurityContext generates the corresponding pod security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
@ -115,12 +130,28 @@ func GetLinuxLabel() *v1.SELinuxOptions {
|
||||
Level: "s0:c0,c1"}
|
||||
}
|
||||
|
||||
// GetRestrictedPodSecurityContext returns a minimal restricted pod security context.
|
||||
// DefaultNonRootUser is the default user ID used for running restricted (non-root) containers.
|
||||
const DefaultNonRootUser = 1000
|
||||
|
||||
// DefaultNonRootUserName is the default username in Windows used for running restricted (non-root) containers
|
||||
const DefaultNonRootUserName = "ContainerUser"
|
||||
|
||||
// GetRestrictedPodSecurityContext returns a restricted pod security context.
|
||||
// This includes setting RunAsUser for convenience, to pass the RunAsNonRoot check.
|
||||
// Tests that require a specific user ID should override this.
|
||||
func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
|
||||
return &v1.PodSecurityContext{
|
||||
psc := &v1.PodSecurityContext{
|
||||
RunAsNonRoot: pointer.BoolPtr(true),
|
||||
RunAsUser: GetDefaultNonRootUser(),
|
||||
SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault},
|
||||
}
|
||||
|
||||
if NodeOSDistroIs("windows") {
|
||||
psc.WindowsOptions = &v1.WindowsSecurityContextOptions{}
|
||||
psc.WindowsOptions.RunAsUserName = pointer.StringPtr(DefaultNonRootUserName)
|
||||
}
|
||||
|
||||
return psc
|
||||
}
|
||||
|
||||
// GetRestrictedContainerSecurityContext returns a minimal restricted container security context.
|
||||
@ -130,3 +161,73 @@ func GetRestrictedContainerSecurityContext() *v1.SecurityContext {
|
||||
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
|
||||
}
|
||||
}
|
||||
|
||||
var psaEvaluator, _ = psapolicy.NewEvaluator(psapolicy.DefaultChecks())
|
||||
|
||||
// MustMixinRestrictedPodSecurity makes the given pod compliant with the restricted pod security level.
|
||||
// If doing so would overwrite existing non-conformant configuration, a test failure is triggered.
|
||||
func MustMixinRestrictedPodSecurity(pod *v1.Pod) *v1.Pod {
|
||||
err := MixinRestrictedPodSecurity(pod)
|
||||
gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred())
|
||||
return pod
|
||||
}
|
||||
|
||||
// MixinRestrictedPodSecurity makes the given pod compliant with the restricted pod security level.
|
||||
// If doing so would overwrite existing non-conformant configuration, an error is returned.
|
||||
// Note that this sets a default RunAsUser. See GetRestrictedPodSecurityContext.
|
||||
// TODO(#105919): Handle PodOS for windows pods.
|
||||
func MixinRestrictedPodSecurity(pod *v1.Pod) error {
|
||||
if pod.Spec.SecurityContext == nil {
|
||||
pod.Spec.SecurityContext = GetRestrictedPodSecurityContext()
|
||||
} else {
|
||||
if pod.Spec.SecurityContext.RunAsNonRoot == nil {
|
||||
pod.Spec.SecurityContext.RunAsNonRoot = pointer.BoolPtr(true)
|
||||
}
|
||||
if pod.Spec.SecurityContext.RunAsUser == nil {
|
||||
pod.Spec.SecurityContext.RunAsUser = GetDefaultNonRootUser()
|
||||
}
|
||||
if pod.Spec.SecurityContext.SeccompProfile == nil {
|
||||
pod.Spec.SecurityContext.SeccompProfile = &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}
|
||||
}
|
||||
if NodeOSDistroIs("windows") && pod.Spec.SecurityContext.WindowsOptions == nil {
|
||||
pod.Spec.SecurityContext.WindowsOptions = &v1.WindowsSecurityContextOptions{}
|
||||
pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = pointer.StringPtr(DefaultNonRootUserName)
|
||||
}
|
||||
}
|
||||
for i := range pod.Spec.Containers {
|
||||
mixinRestrictedContainerSecurityContext(&pod.Spec.Containers[i])
|
||||
}
|
||||
for i := range pod.Spec.InitContainers {
|
||||
mixinRestrictedContainerSecurityContext(&pod.Spec.InitContainers[i])
|
||||
}
|
||||
|
||||
// Validate the resulting pod against the restricted profile.
|
||||
restricted := psaapi.LevelVersion{
|
||||
Level: psaapi.LevelRestricted,
|
||||
Version: psaapi.LatestVersion(),
|
||||
}
|
||||
if agg := psapolicy.AggregateCheckResults(psaEvaluator.EvaluatePod(restricted, &pod.ObjectMeta, &pod.Spec)); !agg.Allowed {
|
||||
return fmt.Errorf("failed to make pod %s restricted: %s", pod.Name, agg.ForbiddenDetail())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mixinRestrictedContainerSecurityContext adds the required container security context options to
|
||||
// be compliant with the restricted pod security level. Non-conformance checking is handled by the
|
||||
// caller.
|
||||
func mixinRestrictedContainerSecurityContext(container *v1.Container) {
|
||||
if container.SecurityContext == nil {
|
||||
container.SecurityContext = GetRestrictedContainerSecurityContext()
|
||||
} else {
|
||||
if container.SecurityContext.AllowPrivilegeEscalation == nil {
|
||||
container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false)
|
||||
}
|
||||
if container.SecurityContext.Capabilities == nil {
|
||||
container.SecurityContext.Capabilities = &v1.Capabilities{}
|
||||
}
|
||||
if len(container.SecurityContext.Capabilities.Drop) == 0 {
|
||||
container.SecurityContext.Capabilities.Drop = []v1.Capability{"ALL"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
458
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
458
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -65,31 +65,73 @@ const (
|
||||
|
||||
type podCondition func(pod *v1.Pod) (bool, error)
|
||||
|
||||
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
||||
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration, err error) string {
|
||||
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
||||
if err != nil {
|
||||
errStr += fmt.Sprintf("Last error: %s\n", err)
|
||||
type timeoutError struct {
|
||||
msg string
|
||||
observedObjects []interface{}
|
||||
}
|
||||
|
||||
func (e *timeoutError) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
func TimeoutError(msg string, observedObjects ...interface{}) *timeoutError {
|
||||
return &timeoutError{
|
||||
msg: msg,
|
||||
observedObjects: observedObjects,
|
||||
}
|
||||
}
|
||||
|
||||
// maybeTimeoutError returns a TimeoutError if err is a timeout. Otherwise, wrap err.
|
||||
// taskFormat and taskArgs should be the task being performed when the error occurred,
|
||||
// e.g. "waiting for pod to be running".
|
||||
func maybeTimeoutError(err error, taskFormat string, taskArgs ...interface{}) error {
|
||||
if IsTimeout(err) {
|
||||
return TimeoutError(fmt.Sprintf("timed out while "+taskFormat, taskArgs...))
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error while %s: %w", fmt.Sprintf(taskFormat, taskArgs...), err)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func IsTimeout(err error) bool {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(*timeoutError); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
||||
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration, err error) error {
|
||||
errStr := fmt.Sprintf("%d / %d pods in namespace %s are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
||||
|
||||
// Print bad pods info only if there are fewer than 10 bad pods
|
||||
if len(badPods) > 10 {
|
||||
return errStr + "There are too many bad pods. Please check log for details."
|
||||
errStr += "There are too many bad pods. Please check log for details."
|
||||
} else {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
|
||||
for _, badPod := range badPods {
|
||||
grace := ""
|
||||
if badPod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
|
||||
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
|
||||
fmt.Fprintln(w, podInfo)
|
||||
}
|
||||
w.Flush()
|
||||
errStr += buf.String()
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
|
||||
for _, badPod := range badPods {
|
||||
grace := ""
|
||||
if badPod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
|
||||
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
|
||||
fmt.Fprintln(w, podInfo)
|
||||
if err != nil && !IsTimeout(err) {
|
||||
return fmt.Errorf("%s\nLast error: %w", errStr, err)
|
||||
}
|
||||
w.Flush()
|
||||
return errStr + buf.String()
|
||||
return TimeoutError(errStr)
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
||||
@ -129,10 +171,9 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
lastAPIError = nil
|
||||
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
lastAPIError = err
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
||||
lastAPIError = err
|
||||
return false, err
|
||||
return handleWaitingAPIError(err, false, "listing replication controllers in namespace %s", ns)
|
||||
}
|
||||
for _, rc := range rcList.Items {
|
||||
replicas += *rc.Spec.Replicas
|
||||
@ -140,10 +181,9 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
}
|
||||
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
lastAPIError = err
|
||||
if err != nil {
|
||||
lastAPIError = err
|
||||
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
||||
return false, err
|
||||
return handleWaitingAPIError(err, false, "listing replication sets in namespace %s", ns)
|
||||
}
|
||||
for _, rs := range rsList.Items {
|
||||
replicas += *rs.Spec.Replicas
|
||||
@ -151,10 +191,9 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
lastAPIError = err
|
||||
if err != nil {
|
||||
lastAPIError = err
|
||||
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
|
||||
return false, err
|
||||
return handleWaitingAPIError(err, false, "listing pods in namespace %s", ns)
|
||||
}
|
||||
nOk := int32(0)
|
||||
notReady = int32(0)
|
||||
@ -197,7 +236,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
return false, nil
|
||||
}) != nil {
|
||||
if !ignoreNotReady {
|
||||
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError))
|
||||
return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)
|
||||
}
|
||||
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
}
|
||||
@ -205,35 +244,85 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
|
||||
var lastPodError error
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
|
||||
var (
|
||||
lastPodError error
|
||||
lastPod *v1.Pod
|
||||
start = time.Now()
|
||||
)
|
||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
lastPodError = err
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
||||
} else {
|
||||
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
|
||||
}
|
||||
continue
|
||||
return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(ns, podName))
|
||||
}
|
||||
lastPod = pod // Don't overwrite if an error occurs after successfully retrieving.
|
||||
|
||||
// log now so that current pod info is reported before calling `condition()`
|
||||
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
|
||||
if done, err := condition(pod); done {
|
||||
if err == nil {
|
||||
e2elog.Logf("Pod %q satisfied condition %q", podName, desc)
|
||||
e2elog.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
|
||||
}
|
||||
return err
|
||||
return true, err
|
||||
} else if err != nil {
|
||||
// TODO(#109732): stop polling and return the error in this case.
|
||||
e2elog.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if IsTimeout(err) {
|
||||
if lastPod != nil {
|
||||
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc),
|
||||
lastPod,
|
||||
)
|
||||
} else if lastPodError != nil {
|
||||
// If the last API call was an error, propagate that instead of the timeout error.
|
||||
err = lastPodError
|
||||
}
|
||||
}
|
||||
if apierrors.IsNotFound(lastPodError) {
|
||||
// return for compatbility with other functions testing for IsNotFound
|
||||
return lastPodError
|
||||
}
|
||||
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
|
||||
return maybeTimeoutError(err, "waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc)
|
||||
}
|
||||
|
||||
// WaitForPodsCondition waits for the listed pods to match the given condition.
|
||||
// To succeed, at least minPods must be listed, and all listed pods must match the condition.
|
||||
func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) {
|
||||
e2elog.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
|
||||
var pods *v1.PodList
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
if len(pods.Items) < minPods {
|
||||
e2elog.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
nonMatchingPods := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
done, err := condition(&pod)
|
||||
if done && err != nil {
|
||||
return false, fmt.Errorf("error evaluating pod %s: %w", identifier(&pod), err)
|
||||
}
|
||||
if !done {
|
||||
nonMatchingPods = append(nonMatchingPods, identifier(&pod))
|
||||
}
|
||||
}
|
||||
matched = len(pods.Items) - len(nonMatchingPods)
|
||||
if len(nonMatchingPods) <= 0 {
|
||||
return true, nil // All pods match.
|
||||
}
|
||||
e2elog.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
|
||||
return false, nil
|
||||
})
|
||||
return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched)
|
||||
}
|
||||
|
||||
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
|
||||
@ -242,7 +331,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
|
||||
// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to
|
||||
// the supplied reason.
|
||||
func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("terminated with reason %s", reason), podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
// Only consider Failed pods. Successful pods will be deleted and detected in
|
||||
// waitForPodCondition's Get call returning `IsNotFound`
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
@ -295,33 +384,6 @@ func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, name
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
|
||||
// waits and checks if all match pods are in the given podCondition
|
||||
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conditionNotMatch := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
done, err := condition(&pod)
|
||||
if done && err != nil {
|
||||
return fmt.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !done {
|
||||
conditionNotMatch = append(conditionNotMatch, fmt.Sprintf("%s_%s(%s)", pod.Name, pod.Namespace, pod.UID))
|
||||
}
|
||||
}
|
||||
if len(conditionNotMatch) <= 0 {
|
||||
return err
|
||||
}
|
||||
e2elog.Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
|
||||
}
|
||||
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
|
||||
}
|
||||
|
||||
// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running.
|
||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
@ -337,7 +399,15 @@ func WaitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace
|
||||
|
||||
// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running.
|
||||
func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podRunning(c, podName, namespace))
|
||||
return WaitForPodCondition(c, namespace, podName, "running", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, errPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running.
|
||||
@ -351,7 +421,13 @@ func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
|
||||
|
||||
// WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop.
|
||||
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podCompleted(c, podName, namespace))
|
||||
return WaitForPodCondition(c, namespace, podName, "completed", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunningInNamespace waits default amount of time (defaultPodDeletionTimeout) for the specified pod to stop running.
|
||||
@ -363,14 +439,32 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp
|
||||
// WaitTimeoutForPodReadyInNamespace waits the given timeout duration for the
|
||||
// specified pod to be ready and running.
|
||||
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace))
|
||||
return WaitForPodCondition(c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
e2elog.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
return false, errPodCompleted
|
||||
case v1.PodRunning:
|
||||
e2elog.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
|
||||
return podutils.IsPodReady(pod), nil
|
||||
}
|
||||
e2elog.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
|
||||
// The resourceVersion is used when Watching object changes, it tells since when we care
|
||||
// about changes to the pod.
|
||||
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
|
||||
return wait.PollImmediate(poll, podStartTimeout, podNotPending(c, podName, ns))
|
||||
return WaitForPodCondition(c, ns, podName, "not pending", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodPending:
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
|
||||
@ -388,32 +482,45 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam
|
||||
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
|
||||
// than "not found" then that error is returned and the wait stops.
|
||||
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
var lastPod *v1.Pod
|
||||
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil // done
|
||||
}
|
||||
if err != nil {
|
||||
return true, err // stop wait with error
|
||||
return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(ns, podName))
|
||||
}
|
||||
lastPod = pod
|
||||
return false, nil
|
||||
})
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if IsTimeout(err) && lastPod != nil {
|
||||
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to be Not Found", podIdentifier(ns, podName)),
|
||||
lastPod,
|
||||
)
|
||||
}
|
||||
return maybeTimeoutError(err, "waiting for pod %s not found", podIdentifier(ns, podName))
|
||||
}
|
||||
|
||||
// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear.
|
||||
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
var lastPod *v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
e2elog.Logf("Waiting for pod %s to disappear", podName)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
found := false
|
||||
for _, pod := range pods.Items {
|
||||
for i, pod := range pods.Items {
|
||||
if pod.Name == podName {
|
||||
e2elog.Logf("Pod %s still exists", podName)
|
||||
found = true
|
||||
lastPod = &(pods.Items[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -423,91 +530,66 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if IsTimeout(err) {
|
||||
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to disappear", podIdentifier(ns, podName)),
|
||||
lastPod,
|
||||
)
|
||||
}
|
||||
return maybeTimeoutError(err, "waiting for pod %s to disappear", podIdentifier(ns, podName))
|
||||
}
|
||||
|
||||
// PodsResponding waits for the pods to response.
|
||||
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
|
||||
ginkgo.By("trying to dial each unique pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
err := wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
return maybeTimeoutError(err, "waiting for pods to be responsive")
|
||||
}
|
||||
|
||||
// WaitForNumberOfPods waits up to timeout to ensure there are exact
|
||||
// `num` pods in namespace `ns`.
|
||||
// It returns the matching Pods or a timeout error.
|
||||
func WaitForNumberOfPods(c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
actualNum := 0
|
||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
// ignore intermittent network error
|
||||
if err != nil {
|
||||
return false, nil
|
||||
return handleWaitingAPIError(err, false, "listing pods")
|
||||
}
|
||||
return len(pods.Items) == num, nil
|
||||
actualNum = len(pods.Items)
|
||||
return actualNum == num, nil
|
||||
})
|
||||
return
|
||||
return pods, maybeTimeoutError(err, "waiting for there to be exactly %d pods in namespace (last seen %d)", num, actualNum)
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
||||
// matching pod exists. Return the list of matching pods.
|
||||
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
err = wait.PollImmediate(poll, podScheduledBeforeTimeout,
|
||||
func() (bool, error) {
|
||||
pods, err = WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(c, ns, opts, 1, "scheduled", podScheduledBeforeTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label
|
||||
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(pods.Items) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if pods == nil || len(pods.Items) == 0 {
|
||||
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
|
||||
}
|
||||
return
|
||||
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (*v1.PodList, error) {
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(c, ns, opts, 1, "existent", podListTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelRunningReady waits for exact amount of matching pods to become running and ready.
|
||||
// Return the list of matching pods.
|
||||
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
var current int
|
||||
err = wait.Poll(poll, timeout,
|
||||
func() (bool, error) {
|
||||
pods, err = WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list pods: %v", err)
|
||||
return false, err
|
||||
}
|
||||
current = 0
|
||||
for _, pod := range pods.Items {
|
||||
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
|
||||
current++
|
||||
}
|
||||
}
|
||||
if current != num {
|
||||
e2elog.Logf("Got %v pods running and ready, expect: %v", current, num)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(c, ns, opts, 1, "running and ready", podListTimeout, testutils.PodRunningReady)
|
||||
}
|
||||
|
||||
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
||||
@ -540,33 +622,101 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
|
||||
// invalid container configuration. In this case, the container will remain in a waiting state with a specific
|
||||
// reason set, which should match the given reason.
|
||||
func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string, containerIndex int, reason string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podContainerFailed(c, namespace, podName, containerIndex, reason))
|
||||
conditionDesc := fmt.Sprintf("container %d failed with reason %s", containerIndex, reason)
|
||||
return WaitForPodCondition(c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodPending:
|
||||
if len(pod.Status.ContainerStatuses) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
containerStatus := pod.Status.ContainerStatuses[containerIndex]
|
||||
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == reason {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
case v1.PodFailed, v1.PodRunning, v1.PodSucceeded:
|
||||
return false, fmt.Errorf("pod was expected to be pending, but it is in the state: %s", pod.Status.Phase)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodContainerStarted waits for the given Pod container to start, after a successful run of the startupProbe.
|
||||
func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podContainerStarted(c, namespace, podName, containerIndex))
|
||||
conditionDesc := fmt.Sprintf("container %d started", containerIndex)
|
||||
return WaitForPodCondition(c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if containerIndex > len(pod.Status.ContainerStatuses)-1 {
|
||||
return false, nil
|
||||
}
|
||||
containerStatus := pod.Status.ContainerStatuses[containerIndex]
|
||||
return *containerStatus.Started, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodFailedReason wait for pod failed reason in status, for example "SysctlForbidden".
|
||||
func WaitForPodFailedReason(c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error {
|
||||
waitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pod.Status.Reason == reason {
|
||||
return true, nil
|
||||
conditionDesc := fmt.Sprintf("failed with reason %s", reason)
|
||||
return WaitForPodCondition(c, pod.Namespace, pod.Name, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodSucceeded:
|
||||
return true, errors.New("pod succeeded unexpectedly")
|
||||
case v1.PodFailed:
|
||||
if pod.Status.Reason == reason {
|
||||
return true, nil
|
||||
} else {
|
||||
return true, fmt.Errorf("pod failed with reason %s", pod.Status.Reason)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
return fmt.Errorf("error waiting for pod failure status: %v", waitErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForContainerRunning waits for the given Pod container to have a state of running
|
||||
func WaitForContainerRunning(c clientset.Interface, namespace, podName, containerName string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, isContainerRunning(c, namespace, podName, containerName))
|
||||
conditionDesc := fmt.Sprintf("container %s running", containerName)
|
||||
return WaitForPodCondition(c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
|
||||
for _, cs := range statuses {
|
||||
if cs.Name == containerName {
|
||||
return cs.State.Running != nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function.
|
||||
// If the error is retryable, sleep the recommended delay and ignore the error.
|
||||
// If the erorr is terminal, return it.
|
||||
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
|
||||
taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
|
||||
if retryNotFound && apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("Ignoring NotFound error while " + taskDescription)
|
||||
return false, nil
|
||||
}
|
||||
if retry, delay := shouldRetry(err); retry {
|
||||
e2elog.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
|
||||
if delay > 0 {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Decide whether to retry an API request. Optionally include a delay to retry after.
|
||||
func shouldRetry(err error) (retry bool, retryAfter time.Duration) {
|
||||
// if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
|
||||
if delay, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
|
||||
return shouldRetry, time.Duration(delay) * time.Second
|
||||
}
|
||||
|
||||
// these errors indicate a transient error that should be retried.
|
||||
if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
return false, 0
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@ -36,7 +36,7 @@ import (
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
|
192
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
192
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
@ -1,192 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
podSecurityPolicyPrivileged = "e2e-test-privileged-psp"
|
||||
|
||||
// allowAny is the wildcard used to allow any profile.
|
||||
allowAny = "*"
|
||||
|
||||
// allowedProfilesAnnotationKey specifies the allowed seccomp profiles.
|
||||
allowedProfilesAnnotationKey = "seccomp.security.alpha.kubernetes.io/allowedProfileNames"
|
||||
)
|
||||
|
||||
var (
|
||||
isPSPEnabledOnce sync.Once
|
||||
isPSPEnabled bool
|
||||
)
|
||||
|
||||
// privilegedPSP creates a PodSecurityPolicy that allows everything.
|
||||
func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
|
||||
allowPrivilegeEscalation := true
|
||||
return &policyv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{allowedProfilesAnnotationKey: allowAny},
|
||||
},
|
||||
Spec: policyv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
|
||||
AllowedCapabilities: []v1.Capability{"*"},
|
||||
Volumes: []policyv1beta1.FSType{policyv1beta1.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []policyv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: policyv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: policyv1beta1.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SELinux: policyv1beta1.SELinuxStrategyOptions{
|
||||
Rule: policyv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: policyv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: policyv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: policyv1beta1.FSGroupStrategyOptions{
|
||||
Rule: policyv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
AllowedUnsafeSysctls: []string{"*"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false.
|
||||
func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool {
|
||||
isPSPEnabledOnce.Do(func() {
|
||||
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
|
||||
return
|
||||
}
|
||||
if psps == nil || len(psps.Items) == 0 {
|
||||
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
|
||||
return
|
||||
}
|
||||
Logf("Found PodSecurityPolicies; testing pod creation to see if PodSecurityPolicy is enabled")
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{GenerateName: "psp-test-pod-"},
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{{Name: "test", Image: imageutils.GetPauseImageName()}}},
|
||||
}
|
||||
dryRunPod, err := kubeClient.CoreV1().Pods("kube-system").Create(context.TODO(), testPod, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "PodSecurityPolicy") {
|
||||
Logf("PodSecurityPolicy error creating dryrun pod; assuming PodSecurityPolicy is enabled: %v", err)
|
||||
isPSPEnabled = true
|
||||
} else {
|
||||
Logf("Error creating dryrun pod; assuming PodSecurityPolicy is disabled: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
pspAnnotation, pspAnnotationExists := dryRunPod.Annotations["kubernetes.io/psp"]
|
||||
if !pspAnnotationExists {
|
||||
Logf("No PSP annotation exists on dry run pod; assuming PodSecurityPolicy is disabled")
|
||||
return
|
||||
}
|
||||
Logf("PSP annotation exists on dry run pod: %q; assuming PodSecurityPolicy is enabled", pspAnnotation)
|
||||
isPSPEnabled = true
|
||||
})
|
||||
return isPSPEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
privilegedPSPOnce sync.Once
|
||||
)
|
||||
|
||||
// CreatePrivilegedPSPBinding creates the privileged PSP & role
|
||||
func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string) {
|
||||
if !IsPodSecurityPolicyEnabled(kubeClient) {
|
||||
return
|
||||
}
|
||||
// Create the privileged PSP & role
|
||||
privilegedPSPOnce.Do(func() {
|
||||
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), podSecurityPolicyPrivileged, metav1.GetOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
// Privileged PSP was already created.
|
||||
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
|
||||
return
|
||||
}
|
||||
|
||||
psp := privilegedPSP(podSecurityPolicyPrivileged)
|
||||
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp, metav1.CreateOptions{})
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
}
|
||||
|
||||
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{podSecurityPolicyPrivileged},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
}, metav1.CreateOptions{})
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if e2eauth.IsRBACEnabled(kubeClient.RbacV1()) {
|
||||
ginkgo.By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
|
||||
podSecurityPolicyPrivileged, namespace))
|
||||
err := e2eauth.BindClusterRoleInNamespace(kubeClient.RbacV1(),
|
||||
podSecurityPolicyPrivileged,
|
||||
namespace,
|
||||
rbacv1.Subject{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
},
|
||||
rbacv1.Subject{
|
||||
Kind: rbacv1.GroupKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "system:serviceaccounts:" + namespace,
|
||||
},
|
||||
)
|
||||
ExpectNoError(err)
|
||||
ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
}
|
||||
}
|
63
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
63
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package pv
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
)
|
||||
@ -45,14 +46,6 @@ const (
|
||||
// VolumeSelectorKey is the key for volume selector.
|
||||
VolumeSelectorKey = "e2e-pv-pool"
|
||||
|
||||
// isDefaultStorageClassAnnotation represents a StorageClass annotation that
|
||||
// marks a class as the default StorageClass
|
||||
isDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class"
|
||||
|
||||
// betaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation.
|
||||
// TODO: remove Beta when no longer used
|
||||
betaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class"
|
||||
|
||||
// volumeGidAnnotationKey is the of the annotation on the PersistentVolume
|
||||
// object that specifies a supplemental GID.
|
||||
// it is copied from k8s.io/kubernetes/pkg/volume/util VolumeGidAnnotationKey
|
||||
@ -80,13 +73,15 @@ type pvcval struct{}
|
||||
// present. We must always Get the pvc object before referencing any of its values, eg.
|
||||
// its VolumeName.
|
||||
// Note: It's unsafe to add keys to a map in a loop. Their insertion in the map is
|
||||
// unpredictable and can result in the same key being iterated over again.
|
||||
//
|
||||
// unpredictable and can result in the same key being iterated over again.
|
||||
type PVCMap map[types.NamespacedName]pvcval
|
||||
|
||||
// PersistentVolumeConfig is consumed by MakePersistentVolume() to generate a PV object
|
||||
// for varying storage options (NFS, ceph, glusterFS, etc.).
|
||||
// (+optional) prebind holds a pre-bound PVC
|
||||
// Example pvSource:
|
||||
//
|
||||
// pvSource: api.PersistentVolumeSource{
|
||||
// NFS: &api.NFSVolumeSource{
|
||||
// ...
|
||||
@ -257,7 +252,8 @@ func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutCo
|
||||
// are deleted. Validates that the claim was deleted and the PV is in the expected Phase (Released,
|
||||
// Available, Bound).
|
||||
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
|
||||
// available pvs.
|
||||
//
|
||||
// available pvs.
|
||||
func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
|
||||
var boundPVs, deletedPVCs int
|
||||
|
||||
@ -347,8 +343,9 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
|
||||
// namespace. If the "preBind" bool is true then pre-bind the PV to the PVC
|
||||
// via the PV's ClaimRef. Return the pv and pvc to reflect the created objects.
|
||||
// Note: in the pre-bind case the real PVC name, which is generated, is not
|
||||
// known until after the PVC is instantiated. This is why the pvc is created
|
||||
// before the pv.
|
||||
//
|
||||
// known until after the PVC is instantiated. This is why the pvc is created
|
||||
// before the pv.
|
||||
func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
// make the pvc spec
|
||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||
@ -382,8 +379,9 @@ func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
|
||||
// via the PVC's VolumeName. Return the pv and pvc to reflect the created
|
||||
// objects.
|
||||
// Note: in the pre-bind case the real PV name, which is generated, is not
|
||||
// known until after the PV is instantiated. This is why the pv is created
|
||||
// before the pvc.
|
||||
//
|
||||
// known until after the PV is instantiated. This is why the pv is created
|
||||
// before the pvc.
|
||||
func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
preBindMsg := ""
|
||||
if preBind {
|
||||
@ -417,7 +415,8 @@ func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvCo
|
||||
// entries for the resources that were successfully created. In other words, when the caller
|
||||
// sees an error returned, it needs to decide what to do about entries in the maps.
|
||||
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
|
||||
// orphaned pvs are not deleted and will remain after the suite completes.
|
||||
//
|
||||
// orphaned pvs are not deleted and will remain after the suite completes.
|
||||
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
|
||||
pvMap := make(PVMap, numpvs)
|
||||
pvcMap := make(PVCMap, numpvcs)
|
||||
@ -504,10 +503,11 @@ func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, n
|
||||
|
||||
// WaitAndVerifyBinds searches for bound PVs and PVCs by examining pvols for non-nil claimRefs.
|
||||
// NOTE: Each iteration waits for a maximum of 3 minutes per PV and, if the PV is bound,
|
||||
// up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead
|
||||
// to situations where the maximum wait times are reached several times in succession,
|
||||
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
||||
// small.
|
||||
//
|
||||
// up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead
|
||||
// to situations where the maximum wait times are reached several times in succession,
|
||||
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
|
||||
// small.
|
||||
func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
|
||||
var actualBinds int
|
||||
expectedBinds := len(pvols)
|
||||
@ -563,8 +563,9 @@ func makePvcKey(ns, name string) types.NamespacedName {
|
||||
// If the PVC is nil then the PV is not defined with a ClaimRef. If no reclaimPolicy
|
||||
// is assigned, assumes "Retain". Specs are expected to match the test's PVC.
|
||||
// Note: the passed-in claim does not have a name until it is created and thus the PV's
|
||||
// ClaimRef cannot be completely filled-in in this func. Therefore, the ClaimRef's name
|
||||
// is added later in CreatePVCPV.
|
||||
//
|
||||
// ClaimRef cannot be completely filled-in in this func. Therefore, the ClaimRef's name
|
||||
// is added later in CreatePVCPV.
|
||||
func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume {
|
||||
var claimRef *v1.ObjectReference
|
||||
|
||||
@ -818,7 +819,7 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
|
||||
}
|
||||
var scName string
|
||||
for _, sc := range list.Items {
|
||||
if isDefaultAnnotation(sc.ObjectMeta) {
|
||||
if util.IsDefaultAnnotation(sc.ObjectMeta) {
|
||||
if len(scName) != 0 {
|
||||
return "", fmt.Errorf("Multiple default storage classes found: %q and %q", scName, sc.Name)
|
||||
}
|
||||
@ -832,20 +833,6 @@ func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
|
||||
return scName, nil
|
||||
}
|
||||
|
||||
// isDefaultAnnotation returns a boolean if the default storage class
|
||||
// annotation is set
|
||||
// TODO: remove Beta when no longer needed
|
||||
func isDefaultAnnotation(obj metav1.ObjectMeta) bool {
|
||||
if obj.Annotations[isDefaultStorageClassAnnotation] == "true" {
|
||||
return true
|
||||
}
|
||||
if obj.Annotations[betaIsDefaultStorageClassAnnotation] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SkipIfNoDefaultStorageClass skips tests if no default SC can be found.
|
||||
func SkipIfNoDefaultStorageClass(c clientset.Interface) {
|
||||
_, err := GetDefaultStorageClassName(c)
|
||||
|
55
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
55
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -26,14 +26,13 @@ import (
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -57,8 +56,18 @@ type SkipPanic struct {
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
const ginkgoSkipPanic = `
|
||||
Your test was skipped.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
defer GinkgoRecover()
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
|
||||
// String makes SkipPanic look like the old Ginkgo panic when printed.
|
||||
func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
func (SkipPanic) String() string { return ginkgoSkipPanic }
|
||||
|
||||
// Skip wraps ginkgo.Skip so that it panics with more useful
|
||||
// information about why the test is being skipped. This function will
|
||||
@ -89,7 +98,7 @@ func skip(message string, callerSkip ...int) {
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
@ -128,16 +137,46 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessFeatureGateEnabled skips if the feature is disabled
|
||||
var featureGate featuregate.FeatureGate
|
||||
|
||||
// InitFeatureGates must be called in test suites that have a --feature-gates parameter.
|
||||
// If not called, SkipUnlessFeatureGateEnabled and SkipIfFeatureGateEnabled will
|
||||
// record a test failure.
|
||||
func InitFeatureGates(defaults featuregate.FeatureGate, overrides map[string]bool) error {
|
||||
clone := defaults.DeepCopy()
|
||||
if err := clone.SetFromMap(overrides); err != nil {
|
||||
return err
|
||||
}
|
||||
featureGate = clone
|
||||
return nil
|
||||
}
|
||||
|
||||
// SkipUnlessFeatureGateEnabled skips if the feature is disabled.
|
||||
//
|
||||
// Beware that this only works in test suites that have a --feature-gate
|
||||
// parameter and call InitFeatureGates. In test/e2e, the `Feature: XYZ` tag
|
||||
// has to be used instead and invocations have to make sure that they
|
||||
// only run tests that work with the given test cluster.
|
||||
func SkipUnlessFeatureGateEnabled(gate featuregate.Feature) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(gate) {
|
||||
if featureGate == nil {
|
||||
framework.Failf("Feature gate checking is not enabled, don't use SkipUnlessFeatureGateEnabled(%v). Instead use the Feature tag.", gate)
|
||||
}
|
||||
if !featureGate.Enabled(gate) {
|
||||
skipInternalf(1, "Only supported when %v feature is enabled", gate)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfFeatureGateEnabled skips if the feature is enabled
|
||||
// SkipIfFeatureGateEnabled skips if the feature is enabled.
|
||||
//
|
||||
// Beware that this only works in test suites that have a --feature-gate
|
||||
// parameter and call InitFeatureGates. In test/e2e, the `Feature: XYZ` tag
|
||||
// has to be used instead and invocations have to make sure that they
|
||||
// only run tests that work with the given test cluster.
|
||||
func SkipIfFeatureGateEnabled(gate featuregate.Feature) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(gate) {
|
||||
if featureGate == nil {
|
||||
framework.Failf("Feature gate checking is not enabled, don't use SkipFeatureGateEnabled(%v). Instead use the Feature tag.", gate)
|
||||
}
|
||||
if featureGate.Enabled(gate) {
|
||||
skipInternalf(1, "Only supported when %v feature is disabled", gate)
|
||||
}
|
||||
}
|
||||
|
167
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
167
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -24,11 +24,15 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@ -52,24 +56,24 @@ const (
|
||||
// into the code which uses the settings.
|
||||
//
|
||||
// The recommendation for those settings is:
|
||||
// - They are stored in their own context structure or local
|
||||
// variables.
|
||||
// - The standard `flag` package is used to register them.
|
||||
// The flag name should follow the pattern <part1>.<part2>....<partn>
|
||||
// where the prefix is unlikely to conflict with other tests or
|
||||
// standard packages and each part is in lower camel case. For
|
||||
// example, test/e2e/storage/csi/context.go could define
|
||||
// storage.csi.numIterations.
|
||||
// - framework/config can be used to simplify the registration of
|
||||
// multiple options with a single function call:
|
||||
// var storageCSI {
|
||||
// NumIterations `default:"1" usage:"number of iterations"`
|
||||
// }
|
||||
// _ config.AddOptions(&storageCSI, "storage.csi")
|
||||
// - The direct use Viper in tests is possible, but discouraged because
|
||||
// it only works in test suites which use Viper (which is not
|
||||
// required) and the supported options cannot be
|
||||
// discovered by a test suite user.
|
||||
// - They are stored in their own context structure or local
|
||||
// variables.
|
||||
// - The standard `flag` package is used to register them.
|
||||
// The flag name should follow the pattern <part1>.<part2>....<partn>
|
||||
// where the prefix is unlikely to conflict with other tests or
|
||||
// standard packages and each part is in lower camel case. For
|
||||
// example, test/e2e/storage/csi/context.go could define
|
||||
// storage.csi.numIterations.
|
||||
// - framework/config can be used to simplify the registration of
|
||||
// multiple options with a single function call:
|
||||
// var storageCSI {
|
||||
// NumIterations `default:"1" usage:"number of iterations"`
|
||||
// }
|
||||
// _ config.AddOptions(&storageCSI, "storage.csi")
|
||||
// - The direct use Viper in tests is possible, but discouraged because
|
||||
// it only works in test suites which use Viper (which is not
|
||||
// required) and the supported options cannot be
|
||||
// discovered by a test suite user.
|
||||
//
|
||||
// Test suite authors can use framework/viper to make all command line
|
||||
// parameters also configurable via a configuration file.
|
||||
@ -150,8 +154,6 @@ type TestContextType struct {
|
||||
DisableLogDump bool
|
||||
// Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.
|
||||
LogexporterGCSPath string
|
||||
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
|
||||
FeatureGates map[string]bool
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
|
||||
@ -188,6 +190,9 @@ type TestContextType struct {
|
||||
// RequireDevices makes mandatory on the environment on which tests are run 1+ devices exposed through device plugins.
|
||||
// With this enabled The e2e tests requiring devices for their operation can assume that if devices aren't reported, the test can fail
|
||||
RequireDevices bool
|
||||
|
||||
// Enable volume drivers which are disabled by default. See test/e2e/storage/in_tree_volumes.go for details.
|
||||
EnabledVolumeDrivers []string
|
||||
}
|
||||
|
||||
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
|
||||
@ -261,6 +266,27 @@ type CloudConfig struct {
|
||||
// TestContext should be used by all tests to access common context data.
|
||||
var TestContext TestContextType
|
||||
|
||||
// StringArrayValue is used with flag.Var for a comma-separated list of strings placed into a string array.
|
||||
type stringArrayValue struct {
|
||||
stringArray *[]string
|
||||
}
|
||||
|
||||
func (v stringArrayValue) String() string {
|
||||
if v.stringArray != nil {
|
||||
return strings.Join(*v.stringArray, ",")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (v stringArrayValue) Set(s string) error {
|
||||
if len(s) == 0 {
|
||||
*v.stringArray = []string{}
|
||||
} else {
|
||||
*v.stringArray = strings.Split(s, ",")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClusterIsIPv6 returns true if the cluster is IPv6
|
||||
func (tc TestContextType) ClusterIsIPv6() bool {
|
||||
return tc.IPFamily == "ipv6"
|
||||
@ -278,20 +304,11 @@ func (tc TestContextType) ClusterIsIPv6() bool {
|
||||
// options themselves, copy flags from test/e2e/framework/config
|
||||
// as shown in HandleFlags.
|
||||
func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
// Turn on verbose by default to get spec names
|
||||
config.DefaultReporterConfig.Verbose = true
|
||||
|
||||
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
|
||||
config.GinkgoConfig.EmitSpecProgress = true
|
||||
|
||||
// Randomize specs as well as suites
|
||||
config.GinkgoConfig.RandomizeAllSpecs = true
|
||||
|
||||
flags.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.")
|
||||
flags.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
|
||||
flags.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.")
|
||||
flags.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.")
|
||||
flags.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.")
|
||||
flags.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framework will gather metrics from all components after the whole test suite completes.")
|
||||
flags.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.")
|
||||
flags.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
|
||||
flags.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
|
||||
@ -303,8 +320,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
|
||||
flags.StringVar(&TestContext.Host, "host", "", fmt.Sprintf("The host, or apiserver, to connect to. Will default to %s if this argument and --kubeconfig are not set.", defaultHost))
|
||||
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
|
||||
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||
flags.Var(cliflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports and other tests results should be saved. Default is empty, which doesn't generate these reports. If ginkgo's -junit-report parameter is used, that parameter instead of -report-dir determines the location of a single JUnit report.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/containerd/containerd.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
|
||||
@ -318,7 +334,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
// suite to work with this legacy non-blocking taint.
|
||||
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests. The default taint 'node-role.kubernetes.io/master' is DEPRECATED and will be removed from the list in a future release.")
|
||||
|
||||
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.")
|
||||
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for running tests.")
|
||||
flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.")
|
||||
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
|
||||
@ -328,6 +344,24 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
|
||||
flags.StringVar(&TestContext.SnapshotControllerPodName, "snapshot-controller-pod-name", "", "The pod name to use for identifying the snapshot controller in the kube-system namespace.")
|
||||
flags.IntVar(&TestContext.SnapshotControllerHTTPPort, "snapshot-controller-http-port", 0, "The port to use for snapshot controller HTTP communication.")
|
||||
|
||||
flags.Var(&stringArrayValue{&TestContext.EnabledVolumeDrivers}, "enabled-volume-drivers", "Comma-separated list of in-tree volume drivers to enable for testing. This is only needed for in-tree drivers disabled by default. An example is gcepd; see test/e2e/storage/in_tree_volumes.go for full details.")
|
||||
}
|
||||
|
||||
func CreateGinkgoConfig() (types.SuiteConfig, types.ReporterConfig) {
|
||||
// fetch the current config
|
||||
suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration()
|
||||
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
|
||||
suiteConfig.EmitSpecProgress = true
|
||||
// Randomize specs as well as suites
|
||||
suiteConfig.RandomizeAllSpecs = true
|
||||
// Turn on verbose by default to get spec names
|
||||
reporterConfig.Verbose = true
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 {
|
||||
suiteConfig.SkipStrings = []string{`\[Flaky\]|\[Feature:.+\]`}
|
||||
}
|
||||
return suiteConfig, reporterConfig
|
||||
}
|
||||
|
||||
// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
|
||||
@ -444,8 +478,21 @@ func GenerateSecureToken(tokenLen int) (string, error) {
|
||||
}
|
||||
|
||||
// AfterReadingAllFlags makes changes to the context after all flags
|
||||
// have been read.
|
||||
// have been read and prepares the process for a test run.
|
||||
func AfterReadingAllFlags(t *TestContextType) {
|
||||
// Reconfigure klog so that output goes to the GinkgoWriter instead
|
||||
// of stderr. The advantage is that it then gets interleaved properly
|
||||
// with output that goes to GinkgoWriter (By, Logf).
|
||||
|
||||
// These flags are not exposed via the normal command line flag set,
|
||||
// therefore we have to use our own private one here.
|
||||
var fs flag.FlagSet
|
||||
klog.InitFlags(&fs)
|
||||
fs.Set("logtostderr", "false")
|
||||
fs.Set("alsologtostderr", "false")
|
||||
fs.Set("one_output", "true")
|
||||
klog.SetOutput(ginkgo.GinkgoWriter)
|
||||
|
||||
// Only set a default host if one won't be supplied via kubeconfig
|
||||
if len(t.Host) == 0 && len(t.KubeConfig) == 0 {
|
||||
// Check if we can use the in-cluster config
|
||||
@ -507,4 +554,54 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if TestContext.ReportDir != "" {
|
||||
ginkgo.ReportAfterSuite("Kubernetes e2e JUnit report", writeJUnitReport)
|
||||
}
|
||||
}
|
||||
|
||||
// writeJUnitReport generates a JUnit file in the e2e report directory that is
|
||||
// shorter than the one normally written by `ginkgo --junit-report`. This is
|
||||
// needed because the full report can become too large for tools like Spyglass
|
||||
// (https://github.com/kubernetes/kubernetes/issues/111510).
|
||||
//
|
||||
// Users who want the full report can use `--junit-report`.
|
||||
func writeJUnitReport(report ginkgo.Report) {
|
||||
trimmedReport := report
|
||||
trimmedReport.SpecReports = nil
|
||||
for _, specReport := range report.SpecReports {
|
||||
// Remove details for any spec that hasn't failed. In Prow,
|
||||
// the test output captured in build-log.txt has all of this
|
||||
// information, so we don't need it in the XML.
|
||||
if specReport.State != types.SpecStateFailed {
|
||||
specReport.CapturedGinkgoWriterOutput = ""
|
||||
specReport.CapturedStdOutErr = ""
|
||||
}
|
||||
|
||||
// Remove report entries generated by ginkgo.By("doing
|
||||
// something") because those are not useful (just have the
|
||||
// start time) and cause Spyglass to show an additional "open
|
||||
// stdout" button with a summary of the steps, which usually
|
||||
// doesn't help. We don't remove all entries because other
|
||||
// measurements also get reported this way.
|
||||
//
|
||||
// Removing the report entries is okay because message text was
|
||||
// already added to the test output when ginkgo.By was called.
|
||||
reportEntries := specReport.ReportEntries
|
||||
specReport.ReportEntries = nil
|
||||
for _, reportEntry := range reportEntries {
|
||||
if reportEntry.Name != "By Step" {
|
||||
specReport.ReportEntries = append(specReport.ReportEntries, reportEntry)
|
||||
}
|
||||
}
|
||||
|
||||
trimmedReport.SpecReports = append(trimmedReport.SpecReports, specReport)
|
||||
}
|
||||
|
||||
// With Ginkgo v1, we used to write one file per parallel node. Now
|
||||
// Ginkgo v2 automatically merges all results into a report for us. The
|
||||
// 01 suffix is kept in case that users expect files to be called
|
||||
// "junit_<prefix><number>.xml".
|
||||
junitReport := path.Join(TestContext.ReportDir, "junit_"+TestContext.ReportPrefix+"01.xml")
|
||||
reporters.GenerateJUnitReport(trimmedReport, junitReport)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
@ -31,7 +31,7 @@ const (
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvCreateTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 5 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
snapshotDeleteTimeout = 5 * time.Minute
|
||||
|
54
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -35,7 +35,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
@ -50,16 +50,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/component-base/featuregate"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
@ -439,7 +436,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
|
||||
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
|
||||
}
|
||||
|
||||
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
|
||||
// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
|
||||
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.Poll(interval, timeout, func() (bool, error) {
|
||||
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
@ -489,10 +486,13 @@ type ClientConfigGetter func() (*restclient.Config, error)
|
||||
func LoadConfig() (config *restclient.Config, err error) {
|
||||
defer func() {
|
||||
if err == nil && config != nil {
|
||||
testDesc := ginkgo.CurrentGinkgoTestDescription()
|
||||
if len(testDesc.ComponentTexts) > 0 {
|
||||
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
|
||||
config.UserAgent = fmt.Sprintf("%s -- %s", rest.DefaultKubernetesUserAgent(), componentTexts)
|
||||
testDesc := ginkgo.CurrentSpecReport()
|
||||
if len(testDesc.ContainerHierarchyTexts) > 0 {
|
||||
testName := strings.Join(testDesc.ContainerHierarchyTexts, " ")
|
||||
if len(testDesc.LeafNodeText) > 0 {
|
||||
testName = testName + " " + testDesc.LeafNodeText
|
||||
}
|
||||
config.UserAgent = fmt.Sprintf("%s -- %s", restclient.DefaultKubernetesUserAgent(), testName)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -774,8 +774,6 @@ func (f *Framework) testContainerOutputMatcher(scenarioName string,
|
||||
type ContainerType int
|
||||
|
||||
const (
|
||||
// FeatureEphemeralContainers allows running an ephemeral container in pod namespaces to troubleshoot a running pod
|
||||
FeatureEphemeralContainers featuregate.Feature = "EphemeralContainers"
|
||||
// Containers is for normal containers
|
||||
Containers ContainerType = 1 << iota
|
||||
// InitContainers is for init containers
|
||||
@ -788,11 +786,7 @@ const (
|
||||
// types except for the ones guarded by feature gate.
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
func allFeatureEnabledContainers() ContainerType {
|
||||
containerType := AllContainers
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(FeatureEphemeralContainers) {
|
||||
containerType &= ^EphemeralContainers
|
||||
}
|
||||
return containerType
|
||||
return AllContainers
|
||||
}
|
||||
|
||||
// ContainerVisitor is called with each container spec, and returns true
|
||||
@ -1357,7 +1351,7 @@ func PrettyPrintJSON(metrics interface{}) string {
|
||||
Logf("Error indenting: %v", err)
|
||||
return ""
|
||||
}
|
||||
return string(formatted.Bytes())
|
||||
return formatted.String()
|
||||
}
|
||||
|
||||
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
@ -1372,18 +1366,22 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
|
||||
// WatchEventSequenceVerifier ...
|
||||
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
|
||||
// testContext cancelation signal across API boundries, e.g: context.TODO()
|
||||
// dc sets up a client to the API
|
||||
// resourceType specify the type of resource
|
||||
// namespace select a namespace
|
||||
// resourceName the name of the given resource
|
||||
// listOptions options used to find the resource, recommended to use listOptions.labelSelector
|
||||
// expectedWatchEvents array of events which are expected to occur
|
||||
// scenario the test itself
|
||||
// retryCleanup a function to run which ensures that there are no dangling resources upon test failure
|
||||
//
|
||||
// testContext cancelation signal across API boundries, e.g: context.TODO()
|
||||
// dc sets up a client to the API
|
||||
// resourceType specify the type of resource
|
||||
// namespace select a namespace
|
||||
// resourceName the name of the given resource
|
||||
// listOptions options used to find the resource, recommended to use listOptions.labelSelector
|
||||
// expectedWatchEvents array of events which are expected to occur
|
||||
// scenario the test itself
|
||||
// retryCleanup a function to run which ensures that there are no dangling resources upon test failure
|
||||
//
|
||||
// this tooling relies on the test to return the events as they occur
|
||||
// the entire scenario must be run to ensure that the desired watch events arrive in order (allowing for interweaving of watch events)
|
||||
// if an expected watch event is missing we elect to clean up and run the entire scenario again
|
||||
//
|
||||
// if an expected watch event is missing we elect to clean up and run the entire scenario again
|
||||
//
|
||||
// we try the scenario three times to allow the sequencing to fail a couple of times
|
||||
func WatchEventSequenceVerifier(ctx context.Context, dc dynamic.Interface, resourceType schema.GroupVersionResource, namespace string, resourceName string, listOptions metav1.ListOptions, expectedWatchEvents []watch.Event, scenario func(*watchtools.RetryWatcher) []watch.Event, retryCleanup func() error) {
|
||||
listWatcher := &cache.ListWatch{
|
||||
@ -1422,7 +1420,7 @@ retriesLoop:
|
||||
break actualWatchEventsLoop
|
||||
}
|
||||
}
|
||||
if foundExpectedWatchEvent == false {
|
||||
if !foundExpectedWatchEvent {
|
||||
errs.Insert(fmt.Sprintf("Watch event %v not found", expectedWatchEvent.Type))
|
||||
}
|
||||
totalValidWatchEvents++
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -60,7 +60,7 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
@ -606,15 +606,10 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
|
||||
ec := &v1.EphemeralContainer{
|
||||
EphemeralContainerCommon: v1.EphemeralContainerCommon(clientPod.Spec.Containers[0]),
|
||||
}
|
||||
ec.Resources = v1.ResourceRequirements{}
|
||||
ec.Name = "volume-ephemeral-container"
|
||||
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
|
||||
// The API server will return NotFound for the subresource when the feature is disabled
|
||||
// BEGIN TODO: remove after EphemeralContainers feature gate is retired
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("Skipping ephemeral container re-test because feature is disabled (error: %q)", err)
|
||||
return
|
||||
}
|
||||
// END TODO: remove after EphemeralContainers feature gate is retired
|
||||
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
|
||||
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
@ -258,7 +258,9 @@ func CopyPodLogs(ctx context.Context, cs clientset.Interface, ns, podName string
|
||||
// logsForPod starts reading the logs for a certain pod. If the pod has more than one
|
||||
// container, opts.Container must be set. Reading stops when the context is done.
|
||||
// The stream includes formatted error messages and ends with
|
||||
// rpc error: code = Unknown desc = Error: No such container: 41a...
|
||||
//
|
||||
// rpc error: code = Unknown desc = Error: No such container: 41a...
|
||||
//
|
||||
// when the pod gets deleted while streaming.
|
||||
func logsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) {
|
||||
return cs.CoreV1().Pods(ns).GetLogs(pod, opts).Stream(ctx)
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -48,11 +48,11 @@ import (
|
||||
// or be built into the binary.
|
||||
//
|
||||
// LoadFromManifests has some limitations:
|
||||
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
|
||||
// and silently ignored
|
||||
// - the latest stable API version for each item is used, regardless of what
|
||||
// is specified in the manifest files
|
||||
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
|
||||
// and silently ignored
|
||||
// - the latest stable API version for each item is used, regardless of what
|
||||
// is specified in the manifest files
|
||||
func LoadFromManifests(files ...string) ([]interface{}, error) {
|
||||
var items []interface{}
|
||||
err := visitManifests(func(data []byte) error {
|
||||
@ -115,11 +115,11 @@ func visitManifests(cb func([]byte) error, files ...string) error {
|
||||
// PatchItems has some limitations:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func PatchItems(f *framework.Framework, driverNamspace *v1.Namespace, items ...interface{}) error {
|
||||
func PatchItems(f *framework.Framework, driverNamespace *v1.Namespace, items ...interface{}) error {
|
||||
for _, item := range items {
|
||||
// Uncomment when debugging the loading and patching of items.
|
||||
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
|
||||
if err := patchItemRecursively(f, driverNamspace, item); err != nil {
|
||||
if err := patchItemRecursively(f, driverNamespace, item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -132,10 +132,10 @@ func PatchItems(f *framework.Framework, driverNamspace *v1.Namespace, items ...i
|
||||
// It returns either a cleanup function or an error, but never both.
|
||||
//
|
||||
// Cleaning up after a test can be triggered in two ways:
|
||||
// - the test invokes the returned cleanup function,
|
||||
// usually in an AfterEach
|
||||
// - the test suite terminates, potentially after
|
||||
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
|
||||
// - the test invokes the returned cleanup function,
|
||||
// usually in an AfterEach
|
||||
// - the test suite terminates, potentially after
|
||||
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
|
||||
//
|
||||
// PatchItems has the some limitations as LoadFromManifests:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
|
21
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
@ -34,22 +33,22 @@ import (
|
||||
//
|
||||
// All of that is optional, see PatchCSIOptions. Just beware
|
||||
// that not renaming the CSI driver deployment can be problematic:
|
||||
// - when multiple tests deploy the driver, they need
|
||||
// to run sequentially
|
||||
// - might conflict with manual deployments
|
||||
// - when multiple tests deploy the driver, they need
|
||||
// to run sequentially
|
||||
// - might conflict with manual deployments
|
||||
//
|
||||
// This function is written so that it works for CSI driver deployments
|
||||
// that follow these conventions:
|
||||
// - driver and provisioner names are identical
|
||||
// - the driver binary accepts a --drivername parameter
|
||||
// - the paths inside the container are either fixed
|
||||
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
|
||||
// okay) or are specified directly in a parameter (for example,
|
||||
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
|
||||
// - driver and provisioner names are identical
|
||||
// - the driver binary accepts a --drivername parameter
|
||||
// - the paths inside the container are either fixed
|
||||
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
|
||||
// okay) or are specified directly in a parameter (for example,
|
||||
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
|
||||
//
|
||||
// Driver deployments that are different will have to do the patching
|
||||
// without this function, or skip patching entirely.
|
||||
func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interface{}) error {
|
||||
func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object interface{}) error {
|
||||
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
|
||||
o.OldDriverName != o.NewDriverName
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/framework.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/framework.go
generated
vendored
@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
import "github.com/onsi/ginkgo/v2"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
17
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -49,7 +49,7 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
|
||||
ns := driverNamespace.Name
|
||||
|
||||
podEventLog := ginkgo.GinkgoWriter
|
||||
var podEventLog io.Writer = ginkgo.GinkgoWriter
|
||||
var podEventLogCloser io.Closer
|
||||
to := podlogs.LogOutput{
|
||||
StatusWriter: ginkgo.GinkgoWriter,
|
||||
@ -57,14 +57,17 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
to.LogWriter = ginkgo.GinkgoWriter
|
||||
} else {
|
||||
test := ginkgo.CurrentGinkgoTestDescription()
|
||||
test := ginkgo.CurrentSpecReport()
|
||||
// Clean up each individual component text such that
|
||||
// it contains only characters that are valid as file
|
||||
// name.
|
||||
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
|
||||
var components []string
|
||||
for _, component := range test.ComponentTexts {
|
||||
components = append(components, reg.ReplaceAllString(component, "_"))
|
||||
var testName []string
|
||||
for _, text := range test.ContainerHierarchyTexts {
|
||||
testName = append(testName, reg.ReplaceAllString(text, "_"))
|
||||
if len(test.LeafNodeText) > 0 {
|
||||
testName = append(testName, reg.ReplaceAllString(test.LeafNodeText, "_"))
|
||||
}
|
||||
}
|
||||
// We end the prefix with a slash to ensure that all logs
|
||||
// end up in a directory named after the current test.
|
||||
@ -74,7 +77,7 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
// keeps each directory name smaller (the full test
|
||||
// name at one point exceeded 256 characters, which was
|
||||
// too much for some filesystems).
|
||||
logDir := framework.TestContext.ReportDir + "/" + strings.Join(components, "/")
|
||||
logDir := framework.TestContext.ReportDir + "/" + strings.Join(testName, "/")
|
||||
to.LogPathPrefix = logDir + "/"
|
||||
|
||||
err := os.MkdirAll(logDir, 0755)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
57
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
57
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -28,18 +28,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -65,11 +63,6 @@ const (
|
||||
maxValidSize string = "10Ei"
|
||||
)
|
||||
|
||||
const (
|
||||
// ClusterRole name for e2e test Priveledged Pod Security Policy User
|
||||
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
|
||||
)
|
||||
|
||||
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
|
||||
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||
@ -417,54 +410,6 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa
|
||||
return pod
|
||||
}
|
||||
|
||||
// PrivilegedTestPSPClusterRoleBinding test Pod Security Policy Role bindings
|
||||
func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
|
||||
namespace string,
|
||||
teardown bool,
|
||||
saNames []string) {
|
||||
bindingString := "Binding"
|
||||
if teardown {
|
||||
bindingString = "Unbinding"
|
||||
}
|
||||
roleBindingClient := client.RbacV1().RoleBindings(namespace)
|
||||
for _, saName := range saNames {
|
||||
ginkgo.By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName))
|
||||
binding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "psp-" + saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
Name: podSecurityPolicyPrivilegedClusterRoleName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}
|
||||
|
||||
roleBindingClient.Delete(context.TODO(), binding.GetName(), metav1.DeleteOptions{})
|
||||
err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
_, err := roleBindingClient.Get(context.TODO(), binding.GetName(), metav1.GetOptions{})
|
||||
return apierrors.IsNotFound(err), nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Timed out waiting for RBAC binding %s deletion: %v", binding.GetName(), err)
|
||||
|
||||
if teardown {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = roleBindingClient.Create(context.TODO(), binding, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func isSudoPresent(nodeIP string, provider string) bool {
|
||||
framework.Logf("Checking if sudo command is present")
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-rc.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-rc.yaml
generated
vendored
@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-backend
|
||||
image: k8s.gcr.io/example-dns-backend:v1
|
||||
image: registry.k8s.io/example-dns-backend:v1
|
||||
ports:
|
||||
- name: backend-port
|
||||
containerPort: 8000
|
||||
|
@ -7,7 +7,7 @@ metadata:
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-frontend
|
||||
image: k8s.gcr.io/example-dns-frontend:v1
|
||||
image: registry.k8s.io/example-dns-frontend:v1
|
||||
command:
|
||||
- python
|
||||
- client.py
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/kubectl/agnhost-primary-pod.yaml
generated
vendored
@ -8,7 +8,7 @@ metadata:
|
||||
spec:
|
||||
containers:
|
||||
- name: primary
|
||||
image: k8s.gcr.io/e2e-test-images/agnhost:2.32
|
||||
image: registry.k8s.io/e2e-test-images/agnhost:2.32
|
||||
env:
|
||||
- name: PRIMARY
|
||||
value: "true"
|
||||
@ -21,7 +21,7 @@ spec:
|
||||
- mountPath: /agnhost-primary-data
|
||||
name: data
|
||||
- name: sentinel
|
||||
image: k8s.gcr.io/e2e-test-images/agnhost:2.32
|
||||
image: registry.k8s.io/e2e-test-images/agnhost:2.32
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/sample-device-plugin.yaml
generated
vendored
@ -32,7 +32,7 @@ spec:
|
||||
hostPath:
|
||||
path: /dev
|
||||
containers:
|
||||
- image: k8s.gcr.io/e2e-test-images/sample-device-plugin:1.3
|
||||
- image: registry.k8s.io/e2e-test-images/sample-device-plugin:1.3
|
||||
name: sample-device-plugin
|
||||
env:
|
||||
- name: PLUGIN_SOCK_DIR
|
||||
|
@ -81,6 +81,6 @@ spec:
|
||||
- name: root-mount
|
||||
mountPath: /root
|
||||
containers:
|
||||
- image: "k8s.gcr.io/pause:3.7"
|
||||
- image: "registry.k8s.io/pause:3.8"
|
||||
name: pause
|
||||
|
||||
|
@ -14,7 +14,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: test-server
|
||||
image: k8s.gcr.io/cassandra-e2e-test:0.1
|
||||
image: registry.k8s.io/cassandra-e2e-test:0.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
@ -18,7 +18,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: k8s.gcr.io/etcd:3.2.24
|
||||
image: registry.k8s.io/etcd:3.2.24
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 2380
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/tester.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/tester.yaml
generated
vendored
@ -14,7 +14,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: test-server
|
||||
image: k8s.gcr.io/etcd-statefulset-e2e-test:0.0
|
||||
image: registry.k8s.io/etcd-statefulset-e2e-test:0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
initContainers:
|
||||
- name: install
|
||||
image: k8s.gcr.io/galera-install:0.1
|
||||
image: registry.k8s.io/galera-install:0.1
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--work-dir=/work-dir"
|
||||
@ -44,7 +44,7 @@ spec:
|
||||
mountPath: "/etc/mysql"
|
||||
containers:
|
||||
- name: mysql
|
||||
image: k8s.gcr.io/mysql-galera:e2e
|
||||
image: registry.k8s.io/mysql-galera:e2e
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
@ -58,7 +58,7 @@ spec:
|
||||
- --defaults-file=/etc/mysql/my-galera.cnf
|
||||
- --user=root
|
||||
readinessProbe:
|
||||
# TODO: If docker exec is buggy just use k8s.gcr.io/mysql-healthz:1.0
|
||||
# TODO: If docker exec is buggy just use registry.k8s.io/mysql-healthz:1.0
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
|
@ -14,7 +14,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: test-server
|
||||
image: k8s.gcr.io/mysql-e2e-test:0.1
|
||||
image: registry.k8s.io/mysql-e2e-test:0.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
initContainers:
|
||||
- name: install
|
||||
image: k8s.gcr.io/e2e-test-images/pets/redis-installer:1.5
|
||||
image: registry.k8s.io/e2e-test-images/pets/redis-installer:1.5
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--install-into=/opt"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
initContainers:
|
||||
- name: install
|
||||
image: k8s.gcr.io/e2e-test-images/pets/zookeeper-installer:1.5
|
||||
image: registry.k8s.io/e2e-test-images/pets/zookeeper-installer:1.5
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--install-into=/opt"
|
||||
|
@ -56,11 +56,11 @@ spec:
|
||||
serviceAccount: hello-account
|
||||
containers:
|
||||
- name: hello
|
||||
image: k8s.gcr.io/sig-storage/hello-populator:v1.0.1
|
||||
image: registry.k8s.io/sig-storage/hello-populator:v1.0.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --mode=controller
|
||||
- --image-name=k8s.gcr.io/sig-storage/hello-populator:v1.0.1
|
||||
- --image-name=registry.k8s.io/sig-storage/hello-populator:v1.0.1
|
||||
- --http-endpoint=:8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
|
@ -17,7 +17,7 @@ spec:
|
||||
serviceAccount: volume-data-source-validator
|
||||
containers:
|
||||
- name: volume-data-source-validator
|
||||
image: k8s.gcr.io/sig-storage/volume-data-source-validator:v1.0.0
|
||||
image: registry.k8s.io/sig-storage/volume-data-source-validator:v1.0.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--leader-election=false"
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.3.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.4.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.8.0
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.4.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# for csi-driver-host-path v1.8.0
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.0.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.1.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.8.0
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.3.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.7.3
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.4.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path v1.8.0
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v5.0.0-rc1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
# for csi-driver-host-path master
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v5.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
# for csi-driver-host-path v1.8.0
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# Together with the RBAC file for external-provisioner, this YAML file
|
||||
|
@ -21,7 +21,7 @@ spec:
|
||||
serviceAccountName: csi-gce-pd-controller-sa
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -39,7 +39,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -73,7 +73,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-attacher
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -102,7 +102,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -131,7 +131,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: gce-pd-driver
|
||||
image: k8s.gcr.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
|
||||
image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.4.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=unix:/csi/csi.sock"
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
@ -13,7 +13,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -48,7 +48,7 @@ spec:
|
||||
- name: gce-pd-driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: k8s.gcr.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
|
||||
image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=unix:/csi/csi.sock"
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.7.3/deploy/
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/
|
||||
|
||||
Do not edit manually. Run ./update-hostpath.sh to refresh the content.
|
||||
|
@ -218,7 +218,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -261,7 +261,7 @@ spec:
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: k8s.gcr.io/sig-storage/csi-external-health-monitor-controller:v0.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.4.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
@ -275,7 +275,7 @@ spec:
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -303,13 +303,13 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: k8s.gcr.io/sig-storage/livenessprobe:v2.4.0
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -323,7 +323,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -338,7 +338,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
@ -352,7 +352,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
@ -64,7 +64,7 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: socat
|
||||
image: alpine/socat:1.0.3
|
||||
image: docker.io/alpine/socat:1.7.4.3-r0
|
||||
args:
|
||||
- tcp-listen:10000,fork,reuseaddr
|
||||
- unix-connect:/csi/csi.sock
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-resizer
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.3.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.0.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -34,7 +34,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.0.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -35,7 +35,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
@ -77,7 +77,7 @@ spec:
|
||||
# test for directories or create them. It needs additional privileges
|
||||
# for that.
|
||||
- name: busybox
|
||||
image: k8s.gcr.io/e2e-test-images/busybox:1.29-1
|
||||
image: registry.k8s.io/e2e-test-images/busybox:1.29-2
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
|
3
vendor/k8s.io/kubernetes/test/utils/admission_webhook.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/utils/admission_webhook.go
generated
vendored
@ -86,7 +86,8 @@ func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview)
|
||||
}
|
||||
|
||||
// LocalhostCert was generated from crypto/tls/generate_cert.go with the following command:
|
||||
// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
|
||||
//
|
||||
// go run generate_cert.go --rsa-bits 2048 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
|
||||
var LocalhostCert = []byte(`-----BEGIN CERTIFICATE-----
|
||||
MIIDGDCCAgCgAwIBAgIQTKCKn99d5HhQVCLln2Q+eTANBgkqhkiG9w0BAQsFADAS
|
||||
MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
|
||||
|
12
vendor/k8s.io/kubernetes/test/utils/image/csi_manifest.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/utils/image/csi_manifest.go
generated
vendored
@ -24,20 +24,20 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/kubernetes/test/e2e/testing-manifests"
|
||||
e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests"
|
||||
)
|
||||
|
||||
// All of the image tags are of the format k8s.gcr.io/sig-storage/hostpathplugin:v1.7.3.
|
||||
// All of the image tags are of the format registry.k8s.io/sig-storage/hostpathplugin:v1.7.3.
|
||||
var imageRE = regexp.MustCompile(`^(.*)/([^/:]*):(.*)$`)
|
||||
|
||||
// appendCSIImageConfigs extracts image repo, name and version from
|
||||
// the YAML files under test/e2e/testing-manifests/storage-csi and
|
||||
// creates new config entries for them.
|
||||
func appendCSIImageConfigs(configs map[int]Config) {
|
||||
embeddedFS := testing_manifests.GetE2ETestingManifestsFS().EmbeddedFS
|
||||
func appendCSIImageConfigs(configs map[ImageID]Config) {
|
||||
embeddedFS := e2etestingmanifests.GetE2ETestingManifestsFS().EmbeddedFS
|
||||
|
||||
// We add our images with index numbers that start after the highest existing number.
|
||||
index := 0
|
||||
// We add our images with ImageID numbers that start after the highest existing number.
|
||||
index := ImageID(0)
|
||||
for i := range configs {
|
||||
if i > index {
|
||||
index = i
|
||||
|
71
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
71
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -123,16 +123,16 @@ func readFromURL(url string, writer io.Writer) error {
|
||||
var (
|
||||
initRegistry = RegistryList{
|
||||
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
|
||||
PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images",
|
||||
BuildImageRegistry: "k8s.gcr.io/build-image",
|
||||
PromoterE2eRegistry: "registry.k8s.io/e2e-test-images",
|
||||
BuildImageRegistry: "registry.k8s.io/build-image",
|
||||
InvalidRegistry: "invalid.registry.k8s.io/invalid",
|
||||
GcEtcdRegistry: "k8s.gcr.io",
|
||||
GcRegistry: "k8s.gcr.io",
|
||||
SigStorageRegistry: "k8s.gcr.io/sig-storage",
|
||||
GcEtcdRegistry: "registry.k8s.io",
|
||||
GcRegistry: "registry.k8s.io",
|
||||
SigStorageRegistry: "registry.k8s.io/sig-storage",
|
||||
PrivateRegistry: "gcr.io/k8s-authenticated-test",
|
||||
MicrosoftRegistry: "mcr.microsoft.com",
|
||||
DockerLibraryRegistry: "docker.io/library",
|
||||
CloudProviderGcpRegistry: "k8s.gcr.io/cloud-provider-gcp",
|
||||
CloudProviderGcpRegistry: "registry.k8s.io/cloud-provider-gcp",
|
||||
}
|
||||
|
||||
registry = initReg()
|
||||
@ -141,9 +141,11 @@ var (
|
||||
imageConfigs, originalImageConfigs = initImageConfigs(registry)
|
||||
)
|
||||
|
||||
type ImageID int
|
||||
|
||||
const (
|
||||
// None is to be used for unset/default images
|
||||
None = iota
|
||||
None ImageID = iota
|
||||
// Agnhost image
|
||||
Agnhost
|
||||
// AgnhostPrivate image
|
||||
@ -162,10 +164,8 @@ const (
|
||||
CudaVectorAdd
|
||||
// CudaVectorAdd2 image
|
||||
CudaVectorAdd2
|
||||
// DebianIptables Image
|
||||
DebianIptables
|
||||
// EchoServer image
|
||||
EchoServer
|
||||
// DistrolessIptables Image
|
||||
DistrolessIptables
|
||||
// Etcd image
|
||||
Etcd
|
||||
// GlusterDynamicProvisioner image
|
||||
@ -229,9 +229,9 @@ const (
|
||||
WindowsServer
|
||||
)
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
|
||||
configs := map[int]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.39"}
|
||||
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
|
||||
configs := map[ImageID]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.40"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
@ -240,9 +240,8 @@ func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-2"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
|
||||
configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "bullseye-v1.3.0"}
|
||||
configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.4"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.3-0"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.1.1"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.4-0"}
|
||||
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-2"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-2"}
|
||||
@ -256,11 +255,11 @@ func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
|
||||
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-2"}
|
||||
configs[NodePerfNpbEp] = Config{list.PromoterE2eRegistry, "node-perf/npb-ep", "1.2"}
|
||||
configs[NodePerfNpbIs] = Config{list.PromoterE2eRegistry, "node-perf/npb-is", "1.2"}
|
||||
configs[NodePerfTfWideDeep] = Config{list.PromoterE2eRegistry, "node-perf/tf-wide-deep", "1.1"}
|
||||
configs[NodePerfTfWideDeep] = Config{list.PromoterE2eRegistry, "node-perf/tf-wide-deep", "1.2"}
|
||||
configs[Nonewprivs] = Config{list.PromoterE2eRegistry, "nonewprivs", "1.3"}
|
||||
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.2"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.7"}
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.8"}
|
||||
configs[Perl] = Config{list.PromoterE2eRegistry, "perl", "5.26"}
|
||||
configs[PrometheusDummyExporter] = Config{list.GcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{list.GcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
@ -274,7 +273,7 @@ func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
|
||||
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.4"}
|
||||
configs[WindowsServer] = Config{list.MicrosoftRegistry, "windows", "1809"}
|
||||
|
||||
// This adds more config entries. Those have no pre-defined index number,
|
||||
// This adds more config entries. Those have no pre-defined ImageID number,
|
||||
// but will be used via ReplaceRegistryInImageURL when deploying
|
||||
// CSI drivers (test/e2e/storage/util/create.go).
|
||||
appendCSIImageConfigs(configs)
|
||||
@ -290,8 +289,8 @@ func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
|
||||
|
||||
// GetMappedImageConfigs returns the images if they were mapped to the provided
|
||||
// image repository.
|
||||
func GetMappedImageConfigs(originalImageConfigs map[int]Config, repo string) map[int]Config {
|
||||
configs := make(map[int]Config)
|
||||
func GetMappedImageConfigs(originalImageConfigs map[ImageID]Config, repo string) map[ImageID]Config {
|
||||
configs := make(map[ImageID]Config)
|
||||
for i, config := range originalImageConfigs {
|
||||
switch i {
|
||||
case InvalidRegistryImage, AuthenticatedAlpine,
|
||||
@ -303,7 +302,7 @@ func GetMappedImageConfigs(originalImageConfigs map[int]Config, repo string) map
|
||||
continue
|
||||
}
|
||||
|
||||
// Build a new tag with a the index, a hash of the image spec (to be unique) and
|
||||
// Build a new tag with the ImageID, a hash of the image spec (to be unique) and
|
||||
// shorten and make the pull spec "safe" so it will fit in the tag
|
||||
configs[i] = getRepositoryMappedConfig(i, config, repo)
|
||||
}
|
||||
@ -316,11 +315,11 @@ var (
|
||||
)
|
||||
|
||||
// getRepositoryMappedConfig maps an existing image to the provided repo, generating a
|
||||
// tag that is unique with the input config. The tag will contain the index, a hash of
|
||||
// tag that is unique with the input config. The tag will contain the imageID, a hash of
|
||||
// the image spec (to be unique) and shorten and make the pull spec "safe" so it will
|
||||
// fit in the tag to allow a human to recognize the value. If index is -1, then no
|
||||
// index will be added to the tag.
|
||||
func getRepositoryMappedConfig(index int, config Config, repo string) Config {
|
||||
// fit in the tag to allow a human to recognize the value. If imageID is None, then no
|
||||
// imageID will be added to the tag.
|
||||
func getRepositoryMappedConfig(imageID ImageID, config Config, repo string) Config {
|
||||
parts := strings.SplitN(repo, "/", 2)
|
||||
registry, name := parts[0], parts[1]
|
||||
|
||||
@ -337,10 +336,10 @@ func getRepositoryMappedConfig(index int, config Config, repo string) Config {
|
||||
shortName = shortName[len(shortName)-maxLength:]
|
||||
}
|
||||
var version string
|
||||
if index == -1 {
|
||||
if imageID == None {
|
||||
version = fmt.Sprintf("e2e-%s-%s", shortName, hash)
|
||||
} else {
|
||||
version = fmt.Sprintf("e2e-%d-%s-%s", index, shortName, hash)
|
||||
version = fmt.Sprintf("e2e-%d-%s-%s", imageID, shortName, hash)
|
||||
}
|
||||
|
||||
return Config{
|
||||
@ -351,22 +350,22 @@ func getRepositoryMappedConfig(index int, config Config, repo string) Config {
|
||||
}
|
||||
|
||||
// GetOriginalImageConfigs returns the configuration before any mapping rules.
|
||||
func GetOriginalImageConfigs() map[int]Config {
|
||||
func GetOriginalImageConfigs() map[ImageID]Config {
|
||||
return originalImageConfigs
|
||||
}
|
||||
|
||||
// GetImageConfigs returns the map of imageConfigs
|
||||
func GetImageConfigs() map[int]Config {
|
||||
func GetImageConfigs() map[ImageID]Config {
|
||||
return imageConfigs
|
||||
}
|
||||
|
||||
// GetConfig returns the Config object for an image
|
||||
func GetConfig(image int) Config {
|
||||
func GetConfig(image ImageID) Config {
|
||||
return imageConfigs[image]
|
||||
}
|
||||
|
||||
// GetE2EImage returns the fully qualified URI to an image (including version)
|
||||
func GetE2EImage(image int) string {
|
||||
func GetE2EImage(image ImageID) string {
|
||||
return fmt.Sprintf("%s/%s:%s", imageConfigs[image].registry, imageConfigs[image].name, imageConfigs[image].version)
|
||||
}
|
||||
|
||||
@ -394,10 +393,10 @@ func replaceRegistryInImageURLWithList(imageURL string, reg RegistryList) (strin
|
||||
registryAndUser := strings.Join(parts[:countParts-1], "/")
|
||||
|
||||
if repo := os.Getenv("KUBE_TEST_REPO"); len(repo) > 0 {
|
||||
index := -1
|
||||
imageID := None
|
||||
for i, v := range originalImageConfigs {
|
||||
if v.GetE2EImage() == imageURL {
|
||||
index = i
|
||||
imageID = i
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -405,7 +404,7 @@ func replaceRegistryInImageURLWithList(imageURL string, reg RegistryList) (strin
|
||||
if len(last) == 1 {
|
||||
return "", fmt.Errorf("image %q is required to be in an image:tag format", imageURL)
|
||||
}
|
||||
config := getRepositoryMappedConfig(index, Config{
|
||||
config := getRepositoryMappedConfig(imageID, Config{
|
||||
registry: parts[0],
|
||||
name: strings.Join([]string{strings.Join(parts[1:countParts-1], "/"), last[0]}, "/"),
|
||||
version: last[1],
|
||||
|
4
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -1319,7 +1319,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.7",
|
||||
Image: "registry.k8s.io/pause:3.8",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
@ -1741,7 +1741,7 @@ type DaemonConfig struct {
|
||||
|
||||
func (config *DaemonConfig) Run() error {
|
||||
if config.Image == "" {
|
||||
config.Image = "k8s.gcr.io/pause:3.7"
|
||||
config.Image = "registry.k8s.io/pause:3.8"
|
||||
}
|
||||
nameLabel := map[string]string{
|
||||
"name": config.Name + "-daemon",
|
||||
|
Reference in New Issue
Block a user