mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update replaced k8s.io modules to v0.33.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
dd77e72800
commit
107407b44b
1
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
1
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -46,7 +46,6 @@ rules:
|
||||
"github.com/google/gnostic-models/openapiv3",
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/google/go-cmp/cmp/cmpopts",
|
||||
"github.com/google/gofuzz",
|
||||
"github.com/google/uuid",
|
||||
"github.com/imdario/mergo",
|
||||
"github.com/prometheus/client_golang/",
|
||||
|
3
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS
generated
vendored
3
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/OWNERS
generated
vendored
@ -4,17 +4,16 @@ approvers:
|
||||
- andrewsykim
|
||||
- pohly
|
||||
- oomichi
|
||||
- neolit123
|
||||
- SataQiu
|
||||
reviewers:
|
||||
- sig-testing-reviewers
|
||||
- andrewsykim
|
||||
- pohly
|
||||
- oomichi
|
||||
- neolit123
|
||||
- SataQiu
|
||||
labels:
|
||||
- area/e2e-test-framework
|
||||
emeritus_approvers:
|
||||
- fabriziopandini
|
||||
- timothysc
|
||||
- neolit123
|
||||
|
101
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
101
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
@ -41,11 +41,6 @@ type Feature string
|
||||
// "Linux" or "Windows".
|
||||
type Environment string
|
||||
|
||||
// NodeFeature is the name of a feature that a node must support. To be
|
||||
// removed, see
|
||||
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-testing/3041-node-conformance-and-features#nodefeature.
|
||||
type NodeFeature string
|
||||
|
||||
type Valid[T comparable] struct {
|
||||
items sets.Set[T]
|
||||
frozen bool
|
||||
@ -76,14 +71,13 @@ func (v *Valid[T]) Freeze() {
|
||||
v.frozen = true
|
||||
}
|
||||
|
||||
// These variables contain the parameters that [WithFeature], [WithEnvironment]
|
||||
// and [WithNodeFeatures] accept. The framework itself has no pre-defined
|
||||
// These variables contain the parameters that [WithFeature] and [WithEnvironment] accept.
|
||||
// The framework itself has no pre-defined
|
||||
// constants. Test suites and tests may define their own and then add them here
|
||||
// before calling these With functions.
|
||||
var (
|
||||
ValidFeatures Valid[Feature]
|
||||
ValidEnvironments Valid[Environment]
|
||||
ValidNodeFeatures Valid[NodeFeature]
|
||||
)
|
||||
|
||||
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
|
||||
@ -97,7 +91,12 @@ func IgnoreNotFound(in any) any {
|
||||
inType := reflect.TypeOf(in)
|
||||
inValue := reflect.ValueOf(in)
|
||||
return reflect.MakeFunc(inType, func(args []reflect.Value) []reflect.Value {
|
||||
out := inValue.Call(args)
|
||||
var out []reflect.Value
|
||||
if inType.IsVariadic() {
|
||||
out = inValue.CallSlice(args)
|
||||
} else {
|
||||
out = inValue.Call(args)
|
||||
}
|
||||
if len(out) > 0 {
|
||||
lastValue := out[len(out)-1]
|
||||
last := lastValue.Interface()
|
||||
@ -209,9 +208,18 @@ func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interf
|
||||
case label:
|
||||
fullLabel := strings.Join(arg.parts, ":")
|
||||
addLabel(fullLabel)
|
||||
if arg.extraFeature != "" {
|
||||
texts = append(texts, fmt.Sprintf("[%s]", arg.extraFeature))
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Label("Feature:"+arg.extraFeature))
|
||||
if arg.alphaBetaLevel != "" {
|
||||
texts = append(texts, fmt.Sprintf("[%[1]s]", arg.alphaBetaLevel))
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Label(arg.alphaBetaLevel))
|
||||
}
|
||||
if arg.offByDefault {
|
||||
texts = append(texts, "[Feature:OffByDefault]")
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Label("Feature:OffByDefault"))
|
||||
// Alphas are always off by default but we may want to select
|
||||
// betas based on defaulted-ness.
|
||||
if arg.alphaBetaLevel == "Beta" {
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Label("BetaOffByDefault"))
|
||||
}
|
||||
}
|
||||
if fullLabel == "Serial" {
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Serial)
|
||||
@ -306,6 +314,12 @@ func validateText(location types.CodeLocation, text string, labels []string) {
|
||||
// Okay, was also set as label.
|
||||
continue
|
||||
}
|
||||
// TODO: we currently only set this as a text value
|
||||
// We should probably reflect it into labels, but that could break some
|
||||
// existing jobs and we're still setting on an exact plan
|
||||
if tag == "Feature:OffByDefault" {
|
||||
continue
|
||||
}
|
||||
if deprecatedTags.Has(tag) {
|
||||
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s instead", tag, tag))
|
||||
}
|
||||
@ -351,7 +365,8 @@ func withFeature(name Feature) interface{} {
|
||||
}
|
||||
|
||||
// WithFeatureGate specifies that a certain test or group of tests depends on a
|
||||
// feature gate being enabled. The return value must be passed as additional
|
||||
// feature gate and the corresponding API group (if there is one)
|
||||
// being enabled. The return value must be passed as additional
|
||||
// argument to [framework.It], [framework.Describe], [framework.Context].
|
||||
//
|
||||
// The feature gate must be listed in
|
||||
@ -360,9 +375,21 @@ func withFeature(name Feature) interface{} {
|
||||
// also need to be removed.
|
||||
//
|
||||
// [Alpha] resp. [Beta] get added to the test name automatically depending
|
||||
// on the current stability level of the feature. Feature:Alpha resp.
|
||||
// Feature:Beta get added to the Ginkgo labels because this is a special
|
||||
// requirement for how the cluster needs to be configured.
|
||||
// on the current stability level of the feature, to emulate historic
|
||||
// usage of those tags.
|
||||
//
|
||||
// For label filtering, Alpha resp. Beta get added to the Ginkgo labels.
|
||||
//
|
||||
// [Feature:OffByDefault] gets added to support skipping a test with
|
||||
// a dependency on an alpha or beta feature gate in jobs which use the
|
||||
// traditional \[Feature:.*\] skip regular expression.
|
||||
//
|
||||
// Feature:OffByDefault is also available for label filtering.
|
||||
//
|
||||
// BetaOffByDefault is also added *only as a label* when the feature gate is
|
||||
// an off by default beta feature. This can be used to include/exclude based
|
||||
// on beta + defaulted-ness. Alpha has no equivalent because all alphas are
|
||||
// off by default.
|
||||
//
|
||||
// If the test can run in any cluster that has alpha resp. beta features and
|
||||
// API groups enabled, then annotating it with just WithFeatureGate is
|
||||
@ -391,7 +418,8 @@ func withFeatureGate(featureGate featuregate.Feature) interface{} {
|
||||
}
|
||||
|
||||
l := newLabel("FeatureGate", string(featureGate))
|
||||
l.extraFeature = level
|
||||
l.offByDefault = !spec.Default
|
||||
l.alphaBetaLevel = level
|
||||
return l
|
||||
}
|
||||
|
||||
@ -416,28 +444,6 @@ func withEnvironment(name Environment) interface{} {
|
||||
return newLabel("Environment", string(name))
|
||||
}
|
||||
|
||||
// WithNodeFeature specifies that a certain test or group of tests only works
|
||||
// if the node supports a certain feature. The return value must be passed as
|
||||
// additional argument to [framework.It], [framework.Describe],
|
||||
// [framework.Context].
|
||||
//
|
||||
// The environment must be listed in ValidNodeFeatures.
|
||||
func WithNodeFeature(name NodeFeature) interface{} {
|
||||
return withNodeFeature(name)
|
||||
}
|
||||
|
||||
// WithNodeFeature is a shorthand for the corresponding package function.
|
||||
func (f *Framework) WithNodeFeature(name NodeFeature) interface{} {
|
||||
return withNodeFeature(name)
|
||||
}
|
||||
|
||||
func withNodeFeature(name NodeFeature) interface{} {
|
||||
if !ValidNodeFeatures.items.Has(name) {
|
||||
RecordBug(NewBug(fmt.Sprintf("WithNodeFeature: unknown environment %q", name), 2))
|
||||
}
|
||||
return newLabel("NodeFeature", string(name))
|
||||
}
|
||||
|
||||
// WithConformace specifies that a certain test or group of tests must pass in
|
||||
// all conformant Kubernetes clusters. The return value must be passed as
|
||||
// additional argument to [framework.It], [framework.Describe],
|
||||
@ -559,13 +565,19 @@ func withFlaky() interface{} {
|
||||
type label struct {
|
||||
// parts get concatenated with ":" to build the full label.
|
||||
parts []string
|
||||
// extra is an optional feature name. It gets added as [<extraFeature>]
|
||||
// to the test name and as Feature:<extraFeature> to the labels.
|
||||
extraFeature string
|
||||
// explanation gets set for each label to help developers
|
||||
// who pass a label to a ginkgo function. They need to use
|
||||
// the corresponding framework function instead.
|
||||
explanation string
|
||||
|
||||
// TODO: the fields below are only used for FeatureGates, we may want to refactor
|
||||
|
||||
// alphaBetaLevel is "Alpha", "Beta" or empty for GA features
|
||||
// It gets added as [<level>] [Feature:<level>]
|
||||
// to the test name and as Feature:<level> to the labels.
|
||||
alphaBetaLevel string
|
||||
// set based on featuregate default state
|
||||
offByDefault bool
|
||||
}
|
||||
|
||||
func newLabel(parts ...string) label {
|
||||
@ -588,7 +600,10 @@ func TagsEqual(a, b interface{}) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if al.extraFeature != bl.extraFeature {
|
||||
if al.alphaBetaLevel != bl.alphaBetaLevel {
|
||||
return false
|
||||
}
|
||||
if al.offByDefault != bl.offByDefault {
|
||||
return false
|
||||
}
|
||||
return slices.Equal(al.parts, bl.parts)
|
||||
|
13
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
13
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
@ -188,10 +188,15 @@ func AddExtendedResource(ctx context.Context, clientSet clientset.Interface, nod
|
||||
extendedResourceList := v1.ResourceList{
|
||||
extendedResource: extendedResourceQuantity,
|
||||
}
|
||||
patchPayload, err := json.Marshal(v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: extendedResourceList,
|
||||
Allocatable: extendedResourceList,
|
||||
|
||||
// This is a workaround for the fact that we shouldn't marshal a Node struct to JSON
|
||||
// because it wipes out some fields from node status like the daemonEndpoints and
|
||||
// nodeInfo which should not be changed at this time. We need to use a map instead.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/131229
|
||||
patchPayload, err := json.Marshal(map[string]any{
|
||||
"status": map[string]any{
|
||||
"capacity": extendedResourceList,
|
||||
"allocatable": extendedResourceList,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to marshal node JSON")
|
||||
|
3
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
3
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
@ -176,7 +177,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
}
|
||||
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
if podConfig.FsGroup == nil && !NodeOSDistroIs("windows") {
|
||||
if podConfig.FsGroup == nil && !framework.NodeOSDistroIs("windows") {
|
||||
podConfig.FsGroup = func(i int64) *int64 {
|
||||
return &i
|
||||
}(1000)
|
||||
|
15
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go
generated
vendored
15
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// NewTransport creates a transport which uses the port forward dialer.
|
||||
@ -91,6 +92,20 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
|
||||
}
|
||||
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
|
||||
|
||||
tunnelingDialer, err := portforward.NewSPDYOverWebsocketDialer(req.URL(), restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// First attempt tunneling (websocket) dialer, then fallback to spdy dialer.
|
||||
dialer = portforward.NewFallbackDialer(tunnelingDialer, dialer, func(err error) bool {
|
||||
if httpstream.IsUpgradeFailure(err) || httpstream.IsHTTPSProxyError(err) {
|
||||
framework.Logf("fallback to secondary dialer from primary dialer err: %v", err)
|
||||
return true
|
||||
}
|
||||
framework.Logf("unexpected error trying to use websockets for portforward: %v", err)
|
||||
return false
|
||||
})
|
||||
|
||||
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialer failed: %w", err)
|
||||
|
67
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
67
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
@ -19,15 +19,19 @@ package pod
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
@ -77,8 +81,8 @@ func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
err := execute(ctx, "POST", req.URL(), f.ClientConfig(), options.Stdin, &stdout, &stderr, tty)
|
||||
framework.Logf("ExecWithOptions: execute(%s)", req.URL())
|
||||
err := execute(ctx, req.URL(), f.ClientConfig(), options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
@ -141,11 +145,66 @@ func ExecShellInPodWithFullOutput(ctx context.Context, f *framework.Framework, p
|
||||
return execCommandInPodWithFullOutput(ctx, f, podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func execute(ctx context.Context, method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, method, url)
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
func VerifyExecInPodSucceed(ctx context.Context, f *framework.Framework, pod *v1.Pod, shExec string) error {
|
||||
stdout, stderr, err := ExecShellInPodWithFullOutput(ctx, f, pod.Name, shExec)
|
||||
if err != nil {
|
||||
var exitError clientexec.CodeExitError
|
||||
if errors.As(err, &exitError) {
|
||||
exitCode := exitError.ExitStatus()
|
||||
return fmt.Errorf("%q should succeed, but failed with exit code %d and error message %w\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, exitError, stdout, stderr)
|
||||
} else {
|
||||
return fmt.Errorf("%q should succeed, but failed with error message %w\nstdout: %s\nstderr: %s",
|
||||
shExec, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||
func VerifyExecInPodFail(ctx context.Context, f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) error {
|
||||
stdout, stderr, err := ExecShellInPodWithFullOutput(ctx, f, pod.Name, shExec)
|
||||
if err != nil {
|
||||
var exitError clientexec.CodeExitError
|
||||
if errors.As(err, &exitError) {
|
||||
actualExitCode := exitError.ExitStatus()
|
||||
if actualExitCode == exitCode {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("%q should fail with exit code %d, but failed with exit code %d and error message %w\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, actualExitCode, exitError, stdout, stderr)
|
||||
} else {
|
||||
return fmt.Errorf("%q should fail with exit code %d, but failed with error message %w\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("%q should fail with exit code %d, but exit without error", shExec, exitCode)
|
||||
}
|
||||
|
||||
func execute(ctx context.Context, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
// WebSocketExecutor executor is default
|
||||
// WebSocketExecutor must be "GET" method as described in RFC 6455 Sec. 4.1 (page 17).
|
||||
websocketExec, err := remotecommand.NewWebSocketExecutor(config, "GET", url.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spdyExec, err := remotecommand.NewSPDYExecutor(config, "POST", url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exec, err := remotecommand.NewFallbackExecutor(websocketExec, spdyExec, func(err error) bool {
|
||||
if httpstream.IsUpgradeFailure(err) || httpstream.IsHTTPSProxyError(err) {
|
||||
framework.Logf("fallback to secondary dialer from primary dialer err: %v", err)
|
||||
return true
|
||||
}
|
||||
framework.Logf("unexpected error trying to use websockets for pod exec: %v", err)
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
@ -176,7 +176,7 @@ func MatchMultipleContainerOutputs(
|
||||
createdPod := podClient.Create(ctx, pod)
|
||||
defer func() {
|
||||
ginkgo.By("delete the pod")
|
||||
podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
|
324
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go
generated
vendored
324
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go
generated
vendored
@ -28,7 +28,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
helpers "k8s.io/component-helpers/resource"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubeqos "k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -98,11 +101,13 @@ func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements {
|
||||
}
|
||||
|
||||
type ResizableContainerInfo struct {
|
||||
Name string
|
||||
Resources *ContainerResources
|
||||
CPUPolicy *v1.ResourceResizeRestartPolicy
|
||||
MemPolicy *v1.ResourceResizeRestartPolicy
|
||||
RestartCount int32
|
||||
Name string
|
||||
Resources *ContainerResources
|
||||
CPUPolicy *v1.ResourceResizeRestartPolicy
|
||||
MemPolicy *v1.ResourceResizeRestartPolicy
|
||||
RestartCount int32
|
||||
RestartPolicy v1.ContainerRestartPolicy
|
||||
InitCtr bool
|
||||
}
|
||||
|
||||
type containerPatch struct {
|
||||
@ -142,21 +147,6 @@ func getTestResourceInfo(tcInfo ResizableContainerInfo) (res v1.ResourceRequirem
|
||||
return res, resizePol
|
||||
}
|
||||
|
||||
func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
|
||||
noRestart := v1.NotRequired
|
||||
setDefaultPolicy := func(ci *ResizableContainerInfo) {
|
||||
if ci.CPUPolicy == nil {
|
||||
ci.CPUPolicy = &noRestart
|
||||
}
|
||||
if ci.MemPolicy == nil {
|
||||
ci.MemPolicy = &noRestart
|
||||
}
|
||||
}
|
||||
for i := range containers {
|
||||
setDefaultPolicy(&containers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
|
||||
cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
|
||||
res, resizePol := getTestResourceInfo(tcInfo)
|
||||
@ -169,17 +159,17 @@ func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
|
||||
Resources: res,
|
||||
ResizePolicy: resizePol,
|
||||
}
|
||||
if tcInfo.RestartPolicy != "" {
|
||||
tc.RestartPolicy = &tcInfo.RestartPolicy
|
||||
}
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
|
||||
var testContainers []v1.Container
|
||||
testInitContainers, testContainers := separateContainers(tcInfo)
|
||||
|
||||
for _, ci := range tcInfo {
|
||||
tc := makeResizableContainer(ci)
|
||||
testContainers = append(testContainers, tc)
|
||||
}
|
||||
minGracePeriodSeconds := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -189,54 +179,120 @@ func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []Resizab
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
OS: &v1.PodOS{Name: v1.Linux},
|
||||
Containers: testContainers,
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
OS: &v1.PodOS{Name: v1.Linux},
|
||||
InitContainers: testInitContainers,
|
||||
Containers: testContainers,
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
TerminationGracePeriodSeconds: &minGracePeriodSeconds,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
// separateContainers splits the input into initContainers and normal containers.
|
||||
func separateContainers(tcInfo []ResizableContainerInfo) ([]v1.Container, []v1.Container) {
|
||||
var initContainers, containers []v1.Container
|
||||
|
||||
for _, ci := range tcInfo {
|
||||
tc := makeResizableContainer(ci)
|
||||
if ci.InitCtr {
|
||||
initContainers = append(initContainers, tc)
|
||||
} else {
|
||||
containers = append(containers, tc)
|
||||
}
|
||||
}
|
||||
|
||||
return initContainers, containers
|
||||
}
|
||||
|
||||
// separateContainerStatuses splits the input into initContainerStatuses and containerStatuses.
|
||||
func separateContainerStatuses(tcInfo []ResizableContainerInfo) ([]v1.ContainerStatus, []v1.ContainerStatus) {
|
||||
var containerStatuses, initContainerStatuses []v1.ContainerStatus
|
||||
|
||||
for _, ci := range tcInfo {
|
||||
ctrStatus := v1.ContainerStatus{
|
||||
Name: ci.Name,
|
||||
RestartCount: ci.RestartCount,
|
||||
}
|
||||
if ci.InitCtr {
|
||||
initContainerStatuses = append(initContainerStatuses, ctrStatus)
|
||||
} else {
|
||||
containerStatuses = append(containerStatuses, ctrStatus)
|
||||
}
|
||||
}
|
||||
|
||||
return initContainerStatuses, containerStatuses
|
||||
}
|
||||
|
||||
func VerifyPodResizePolicy(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
|
||||
|
||||
gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...)
|
||||
var wantCtrs []v1.Container
|
||||
for _, ci := range wantInfo {
|
||||
wantCtrs = append(wantCtrs, makeResizableContainer(ci))
|
||||
}
|
||||
gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for _, wantCtr := range wantCtrs {
|
||||
for _, gotCtr := range gotCtrs {
|
||||
if wantCtr.Name != gotCtr.Name {
|
||||
continue
|
||||
}
|
||||
gomega.Expect(v1.Container{Name: gotCtr.Name, ResizePolicy: gotCtr.ResizePolicy}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, ResizePolicy: wantCtr.ResizePolicy}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
func VerifyPodResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
|
||||
|
||||
gotCtrs := append(append([]v1.Container{}, gotPod.Spec.Containers...), gotPod.Spec.InitContainers...)
|
||||
var wantCtrs []v1.Container
|
||||
for _, ci := range wantInfo {
|
||||
wantCtrs = append(wantCtrs, makeResizableContainer(ci))
|
||||
}
|
||||
gomega.Expect(gotCtrs).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for _, wantCtr := range wantCtrs {
|
||||
for _, gotCtr := range gotCtrs {
|
||||
if wantCtr.Name != gotCtr.Name {
|
||||
continue
|
||||
}
|
||||
gomega.Expect(v1.Container{Name: gotCtr.Name, Resources: gotCtr.Resources}).To(gomega.Equal(v1.Container{Name: wantCtr.Name, Resources: wantCtr.Resources}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
|
||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantInfo []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
wantInitCtrs, wantCtrs := separateContainers(wantInfo)
|
||||
var errs []error
|
||||
if err := verifyPodContainersStatusResources(gotPod.Status.InitContainerStatuses, wantInitCtrs); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := verifyPodContainersStatusResources(gotPod.Status.ContainerStatuses, wantCtrs); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyPodContainersStatusResources(gotCtrStatuses []v1.ContainerStatus, wantCtrs []v1.Container) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
var errs []error
|
||||
|
||||
if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) {
|
||||
if len(gotCtrStatuses) != len(wantCtrs) {
|
||||
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
|
||||
len(gotPod.Status.ContainerStatuses), len(wantCtrs))
|
||||
len(gotCtrStatuses), len(wantCtrs))
|
||||
}
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
if gotCtrStatus.Name != ctr.Name {
|
||||
errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name))
|
||||
gotCtrStatus := gotCtrStatuses[i]
|
||||
if gotCtrStatus.Name != wantCtr.Name {
|
||||
errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, wantCtr.Name))
|
||||
continue
|
||||
}
|
||||
if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(ctr.Resources)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", ctr.Name, err))
|
||||
if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(wantCtr.Resources)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", wantCtr.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
@ -266,7 +322,7 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
|
||||
tc := makeResizableContainer(ci)
|
||||
if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
|
||||
var expectedCPUShares int64
|
||||
var expectedCPULimitString, expectedMemLimitString string
|
||||
var expectedMemLimitString string
|
||||
expectedMemLimitInBytes := tc.Resources.Limits.Memory().Value()
|
||||
cpuRequest := tc.Resources.Requests.Cpu()
|
||||
cpuLimit := tc.Resources.Limits.Cpu()
|
||||
@ -275,17 +331,10 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
|
||||
} else {
|
||||
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuRequest.MilliValue()))
|
||||
}
|
||||
cpuQuota := kubecm.MilliCPUToQuota(cpuLimit.MilliValue(), kubecm.QuotaPeriod)
|
||||
if cpuLimit.IsZero() {
|
||||
cpuQuota = -1
|
||||
}
|
||||
expectedCPULimitString = strconv.FormatInt(cpuQuota, 10)
|
||||
|
||||
expectedCPULimits := GetCPULimitCgroupExpectations(cpuLimit)
|
||||
expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10)
|
||||
if *podOnCgroupv2Node {
|
||||
if expectedCPULimitString == "-1" {
|
||||
expectedCPULimitString = "max"
|
||||
}
|
||||
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
|
||||
if expectedMemLimitString == "0" {
|
||||
expectedMemLimitString = "max"
|
||||
}
|
||||
@ -293,45 +342,110 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
|
||||
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
|
||||
expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
|
||||
}
|
||||
|
||||
if expectedMemLimitString != "0" {
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupMemLimit, expectedMemLimitString))
|
||||
}
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimitString))
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimits...))
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
|
||||
// TODO(vinaykul,InPlacePodVerticalScaling): Verify oom_score_adj when runc adds support for updating it
|
||||
// See https://github.com/opencontainers/runc/pull/4669
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyContainerRestarts(pod *v1.Pod, expectedContainers []ResizableContainerInfo) error {
|
||||
func verifyPodRestarts(f *framework.Framework, pod *v1.Pod, wantInfo []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
expectContainerRestarts := map[string]int32{}
|
||||
for _, ci := range expectedContainers {
|
||||
expectContainerRestarts[ci.Name] = ci.RestartCount
|
||||
initCtrStatuses, ctrStatuses := separateContainerStatuses(wantInfo)
|
||||
errs := []error{}
|
||||
if err := verifyContainerRestarts(f, pod, pod.Status.InitContainerStatuses, initCtrStatuses); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := verifyContainerRestarts(f, pod, pod.Status.ContainerStatuses, ctrStatuses); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyContainerRestarts(f *framework.Framework, pod *v1.Pod, gotStatuses []v1.ContainerStatus, wantStatuses []v1.ContainerStatus) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
if len(gotStatuses) != len(wantStatuses) {
|
||||
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
|
||||
len(gotStatuses), len(wantStatuses))
|
||||
}
|
||||
|
||||
errs := []error{}
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
expectedRestarts := expectContainerRestarts[cs.Name]
|
||||
if cs.RestartCount != expectedRestarts {
|
||||
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts))
|
||||
for i, gotStatus := range gotStatuses {
|
||||
if gotStatus.RestartCount != wantStatuses[i].RestartCount {
|
||||
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", gotStatus.Name, gotStatus.RestartCount, wantStatuses[i].RestartCount))
|
||||
} else if gotStatus.RestartCount > 0 {
|
||||
err := verifyOomScoreAdj(f, pod, gotStatus.Name)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod) *v1.Pod {
|
||||
func verifyOomScoreAdj(f *framework.Framework, pod *v1.Pod, containerName string) error {
|
||||
container := FindContainerInPod(pod, containerName)
|
||||
if container == nil {
|
||||
return fmt.Errorf("failed to find container %s in pod %s", containerName, pod.Name)
|
||||
}
|
||||
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(context.Background(), pod.Spec.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodeMemoryCapacity := node.Status.Capacity[v1.ResourceMemory]
|
||||
oomScoreAdj := kubeqos.GetContainerOOMScoreAdjust(pod, container, int64(nodeMemoryCapacity.Value()))
|
||||
expectedOomScoreAdj := strconv.FormatInt(int64(oomScoreAdj), 10)
|
||||
|
||||
return VerifyOomScoreAdjValue(f, pod, container.Name, expectedOomScoreAdj)
|
||||
}
|
||||
|
||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
// Wait for resize to complete.
|
||||
framework.ExpectNoError(WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "resize status cleared", f.Timeouts.PodStart,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
|
||||
framework.ExpectNoError(framework.Gomega().
|
||||
Eventually(ctx, framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(pod.Namespace).Get, pod.Name, metav1.GetOptions{}))).
|
||||
WithTimeout(f.Timeouts.PodStart).
|
||||
Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
|
||||
if helpers.IsPodResizeInfeasible(pod) {
|
||||
// This is a terminal resize state
|
||||
return false, fmt.Errorf("resize is infeasible")
|
||||
return func() string {
|
||||
return "resize is infeasible"
|
||||
}, nil
|
||||
}
|
||||
return pod.Status.Resize == "", nil
|
||||
}), "pod should finish resizing")
|
||||
// TODO: Replace this check with a combination of checking the status.observedGeneration
|
||||
// and the resize status when available.
|
||||
if resourceErrs := VerifyPodStatusResources(pod, expectedContainers); resourceErrs != nil {
|
||||
return func() string {
|
||||
return fmt.Sprintf("container status resources don't match expected: %v", formatErrors(resourceErrs))
|
||||
}, nil
|
||||
}
|
||||
// Wait for kubelet to clear the resize status conditions.
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == v1.PodResizePending || c.Type == v1.PodResizeInProgress {
|
||||
return func() string {
|
||||
return fmt.Sprintf("resize status %v is still present in the pod status", c)
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
// Wait for the pod to be ready.
|
||||
if !podutils.IsPodReady(pod) {
|
||||
return func() string { return "pod is not ready" }, nil
|
||||
}
|
||||
return nil, nil
|
||||
})),
|
||||
)
|
||||
|
||||
resizedPod, err := framework.GetObject(podClient.Get, pod.Name, metav1.GetOptions{})(ctx)
|
||||
framework.ExpectNoError(err, "failed to get resized pod")
|
||||
@ -341,19 +455,6 @@ func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podC
|
||||
func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v1.Pod, expectedContainers []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
// Put each error on a new line for readability.
|
||||
formatErrors := func(err error) error {
|
||||
var agg utilerrors.Aggregate
|
||||
if !errors.As(err, &agg) {
|
||||
return err
|
||||
}
|
||||
|
||||
errStrings := make([]string, len(agg.Errors()))
|
||||
for i, err := range agg.Errors() {
|
||||
errStrings[i] = err.Error()
|
||||
}
|
||||
return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n"))
|
||||
}
|
||||
// Verify Pod Containers Cgroup Values
|
||||
var errs []error
|
||||
if cgroupErrs := VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers); cgroupErrs != nil {
|
||||
@ -362,10 +463,17 @@ func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v
|
||||
if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
|
||||
}
|
||||
if restartErrs := verifyContainerRestarts(resizedPod, expectedContainers); restartErrs != nil {
|
||||
if restartErrs := verifyPodRestarts(f, resizedPod, expectedContainers); restartErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
|
||||
}
|
||||
|
||||
// Verify Pod Resize conditions are empty.
|
||||
for _, condition := range resizedPod.Status.Conditions {
|
||||
if condition.Type == v1.PodResizeInProgress || condition.Type == v1.PodResizePending {
|
||||
errs = append(errs, fmt.Errorf("unexpected resize condition type %s found in pod status", condition.Type))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
resizedPod.ManagedFields = nil // Suppress managed fields in error output.
|
||||
framework.ExpectNoError(formatErrors(utilerrors.NewAggregate(errs)),
|
||||
@ -395,3 +503,35 @@ func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
|
||||
|
||||
return string(patchBytes), nil
|
||||
}
|
||||
|
||||
// UpdateExpectedContainerRestarts updates the RestartCounts in expectedContainers by
|
||||
// adding them to the existing RestartCounts in the containerStatuses of the provided pod.
|
||||
// This reduces the flakiness of the RestartCount assertions by grabbing the current
|
||||
// restart count right before the resize operation, and verify the expected increment (0 or 1)
|
||||
// rather than the absolute count.
|
||||
func UpdateExpectedContainerRestarts(ctx context.Context, pod *v1.Pod, expectedContainers []ResizableContainerInfo) []ResizableContainerInfo {
|
||||
initialRestarts := make(map[string]int32)
|
||||
newExpectedContainers := []ResizableContainerInfo{}
|
||||
for _, ctr := range pod.Status.ContainerStatuses {
|
||||
initialRestarts[ctr.Name] = ctr.RestartCount
|
||||
}
|
||||
for i, ctr := range expectedContainers {
|
||||
newExpectedContainers = append(newExpectedContainers, expectedContainers[i])
|
||||
newExpectedContainers[i].RestartCount += initialRestarts[ctr.Name]
|
||||
}
|
||||
return newExpectedContainers
|
||||
}
|
||||
|
||||
func formatErrors(err error) error {
|
||||
// Put each error on a new line for readability.
|
||||
var agg utilerrors.Aggregate
|
||||
if !errors.As(err, &agg) {
|
||||
return err
|
||||
}
|
||||
|
||||
errStrings := make([]string, len(agg.Errors()))
|
||||
for i, err := range agg.Errors() {
|
||||
errStrings[i] = err.Error()
|
||||
}
|
||||
return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n"))
|
||||
}
|
||||
|
38
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
38
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -59,12 +59,6 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// PodsCreated returns a pod list matched by the given name.
|
||||
func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
return PodsCreatedByLabel(ctx, c, ns, name, replicas, label)
|
||||
}
|
||||
|
||||
// PodsCreatedByLabel returns a created pod list matched by the given label.
|
||||
func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
|
||||
timeout := 2 * time.Minute
|
||||
@ -95,26 +89,32 @@ func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name str
|
||||
}
|
||||
|
||||
// VerifyPods checks if the specified pod is responding.
|
||||
func VerifyPods(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
|
||||
return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, true)
|
||||
}
|
||||
|
||||
// VerifyPodsRunning checks if the specified pod is running.
|
||||
func VerifyPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
|
||||
return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, false)
|
||||
}
|
||||
|
||||
func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
|
||||
pods, err := PodsCreated(ctx, c, ns, name, replicas)
|
||||
func VerifyPods(ctx context.Context, c clientset.Interface, ns, name string, selector labels.Selector, wantName bool, replicas int32) error {
|
||||
pods, err := PodsCreatedByLabel(ctx, c, ns, name, replicas, selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return podsRunningMaybeResponding(ctx, c, ns, name, selector, pods, wantName, true)
|
||||
}
|
||||
|
||||
// VerifyPodsRunning checks if the specified pod is running.
|
||||
func VerifyPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, selector labels.Selector, wantName bool, replicas int32) error {
|
||||
pods, err := PodsCreatedByLabel(ctx, c, ns, name, replicas, selector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return podsRunningMaybeResponding(ctx, c, ns, name, selector, pods, wantName, false)
|
||||
}
|
||||
|
||||
func podsRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns string, name string, selector labels.Selector, pods *v1.PodList, wantName bool, checkResponding bool) error {
|
||||
e := podsRunning(ctx, c, pods)
|
||||
if len(e) > 0 {
|
||||
return fmt.Errorf("failed to wait for pods running: %v", e)
|
||||
}
|
||||
if checkResponding {
|
||||
return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
|
||||
return WaitForPodsResponding(ctx, c, ns, name, selector, wantName, podRespondingTimeout, pods)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -172,7 +172,7 @@ func LogPodStates(pods []v1.Pod) {
|
||||
if pod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]v",
|
||||
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
|
||||
}
|
||||
framework.Logf("") // Final empty line helps for readability.
|
||||
|
129
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
129
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -17,14 +17,16 @@ limitations under the License.
|
||||
package pod
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
psaapi "k8s.io/pod-security-admission/api"
|
||||
@ -32,21 +34,20 @@ import (
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// NodeOSDistroIs returns true if the distro is the same as `--node-os-distro`
|
||||
// the package framework/pod can't import the framework package (see #81245)
|
||||
// we need to check if the --node-os-distro=windows is set and the framework package
|
||||
// is the one that's parsing the flags, as a workaround this method is looking for the same flag again
|
||||
// TODO: replace with `framework.NodeOSDistroIs` when #81245 is complete
|
||||
func NodeOSDistroIs(distro string) bool {
|
||||
var nodeOsDistro *flag.Flag = flag.Lookup("node-os-distro")
|
||||
if nodeOsDistro != nil && nodeOsDistro.Value.String() == distro {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This command runs an infinite loop, sleeping for 1 second in each iteration.
|
||||
// It sets up a trap to exit gracefully when a TERM signal is received.
|
||||
//
|
||||
// This is useful for testing scenarios where the container is terminated
|
||||
// with a zero exit code.
|
||||
const InfiniteSleepCommand = "trap exit TERM; while true; do sleep 1; done"
|
||||
|
||||
// This command will cause the shell to remain in a sleep state indefinitely,
|
||||
// and it won't exit unless it receives a KILL signal.
|
||||
//
|
||||
// This is useful for testing scenarios where the container is terminated
|
||||
// with a non-zero exit code.
|
||||
const InfiniteSleepCommandWithoutGracefulShutdown = "while true; do sleep 100000; done"
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
return []string{"/bin/sh", "-c", command}
|
||||
@ -72,7 +73,7 @@ func GetDefaultTestImageID() imageutils.ImageID {
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImage(id imageutils.ImageID) string {
|
||||
if NodeOSDistroIs("windows") {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
}
|
||||
return imageutils.GetE2EImage(id)
|
||||
@ -82,7 +83,7 @@ func GetTestImage(id imageutils.ImageID) string {
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImageID(id imageutils.ImageID) imageutils.ImageID {
|
||||
if NodeOSDistroIs("windows") {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return imageutils.Agnhost
|
||||
}
|
||||
return id
|
||||
@ -92,7 +93,7 @@ func GetTestImageID(id imageutils.ImageID) imageutils.ImageID {
|
||||
// If the Node OS is windows, we return nill due to issue with invalid permissions set on projected volumes
|
||||
// https://github.com/kubernetes/kubernetes/issues/102849
|
||||
func GetDefaultNonRootUser() *int64 {
|
||||
if NodeOSDistroIs("windows") {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return pointer.Int64(DefaultNonRootUser)
|
||||
@ -102,7 +103,7 @@ func GetDefaultNonRootUser() *int64 {
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext {
|
||||
if NodeOSDistroIs("windows") {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.PodSecurityContext{
|
||||
@ -115,7 +116,7 @@ func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOption
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GenerateContainerSecurityContext(level psaapi.Level) *v1.SecurityContext {
|
||||
if NodeOSDistroIs("windows") {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -139,7 +140,7 @@ func GenerateContainerSecurityContext(level psaapi.Level) *v1.SecurityContext {
|
||||
// GetLinuxLabel returns the default SELinuxLabel based on OS.
|
||||
// If the node OS is windows, it will return nil
|
||||
func GetLinuxLabel() *v1.SELinuxOptions {
|
||||
if NodeOSDistroIs("windows") {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SELinuxOptions{
|
||||
@ -162,7 +163,7 @@ func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
|
||||
SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault},
|
||||
}
|
||||
|
||||
if NodeOSDistroIs("windows") {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
psc.WindowsOptions = &v1.WindowsSecurityContextOptions{}
|
||||
psc.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName)
|
||||
}
|
||||
@ -205,7 +206,7 @@ func MixinRestrictedPodSecurity(pod *v1.Pod) error {
|
||||
if pod.Spec.SecurityContext.SeccompProfile == nil {
|
||||
pod.Spec.SecurityContext.SeccompProfile = &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}
|
||||
}
|
||||
if NodeOSDistroIs("windows") && pod.Spec.SecurityContext.WindowsOptions == nil {
|
||||
if framework.NodeOSDistroIs("windows") && pod.Spec.SecurityContext.WindowsOptions == nil {
|
||||
pod.Spec.SecurityContext.WindowsOptions = &v1.WindowsSecurityContextOptions{}
|
||||
pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName)
|
||||
}
|
||||
@ -258,6 +259,21 @@ func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditi
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindContainerInPod finds the container in a pod by its name
|
||||
func FindContainerInPod(pod *v1.Pod, containerName string) *v1.Container {
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if container.Name == containerName {
|
||||
return &container
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.Name == containerName {
|
||||
return &container
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindContainerStatusInPod finds a container status by its name in the provided pod
|
||||
func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
|
||||
for _, containerStatus := range pod.Status.InitContainerStatuses {
|
||||
@ -279,19 +295,40 @@ func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerSt
|
||||
}
|
||||
|
||||
// VerifyCgroupValue verifies that the given cgroup path has the expected value in
|
||||
// the specified container of the pod. It execs into the container to retrive the
|
||||
// cgroup value and compares it against the expected value.
|
||||
func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath, expectedCgValue string) error {
|
||||
// the specified container of the pod. It execs into the container to retrieve the
|
||||
// cgroup value, and ensures that the retrieved cgroup value is equivalent to at
|
||||
// least one of the values in expectedCgValues.
|
||||
func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath string, expectedCgValues ...string) error {
|
||||
cmd := fmt.Sprintf("head -n 1 %s", cgPath)
|
||||
framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s",
|
||||
pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
|
||||
framework.Logf("Namespace %s Pod %s Container %s - looking for one of the expected cgroup values %s in path %s",
|
||||
pod.Namespace, pod.Name, cName, expectedCgValues, cgPath)
|
||||
cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
|
||||
return fmt.Errorf("failed to find one of the expected cgroup values %q in container cgroup %q", expectedCgValues, cgPath)
|
||||
}
|
||||
cgValue = strings.Trim(cgValue, "\n")
|
||||
if cgValue != expectedCgValue {
|
||||
return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
|
||||
|
||||
if err := framework.Gomega().Expect(cgValue).To(gomega.BeElementOf(expectedCgValues)); err != nil {
|
||||
return fmt.Errorf("value of cgroup %q for container %q should match one of the expectations: %w", cgPath, cName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyOomScoreAdjValue verifies that oom_score_adj for pid 1 (pidof init/systemd -> app)
|
||||
// has the expected value in specified container of the pod. It execs into the container,
|
||||
// reads the oom_score_adj value from procfs, and compares it against the expected value.
|
||||
func VerifyOomScoreAdjValue(f *framework.Framework, pod *v1.Pod, cName, expectedOomScoreAdj string) error {
|
||||
cmd := "cat /proc/1/oom_score_adj"
|
||||
framework.Logf("Namespace %s Pod %s Container %s - looking for oom_score_adj value %s",
|
||||
pod.Namespace, pod.Name, cName, expectedOomScoreAdj)
|
||||
oomScoreAdj, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find expected value %s for container app process", expectedOomScoreAdj)
|
||||
}
|
||||
oomScoreAdj = strings.Trim(oomScoreAdj, "\n")
|
||||
if oomScoreAdj != expectedOomScoreAdj {
|
||||
return fmt.Errorf("oom_score_adj value %s not equal to expected %s", oomScoreAdj, expectedOomScoreAdj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -307,3 +344,35 @@ func IsPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
|
||||
}
|
||||
return len(out) != 0
|
||||
}
|
||||
|
||||
// TODO: Remove the rounded cpu limit values when https://github.com/opencontainers/runc/issues/4622
|
||||
// is fixed.
|
||||
func GetCPULimitCgroupExpectations(cpuLimit *resource.Quantity) []string {
|
||||
var expectedCPULimits []string
|
||||
milliCPULimit := cpuLimit.MilliValue()
|
||||
|
||||
cpuQuota := kubecm.MilliCPUToQuota(milliCPULimit, kubecm.QuotaPeriod)
|
||||
if cpuLimit.IsZero() {
|
||||
cpuQuota = -1
|
||||
}
|
||||
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuota))
|
||||
|
||||
if milliCPULimit%10 != 0 && cpuQuota != -1 {
|
||||
roundedCPULimit := (milliCPULimit/10 + 1) * 10
|
||||
cpuQuotaRounded := kubecm.MilliCPUToQuota(roundedCPULimit, kubecm.QuotaPeriod)
|
||||
expectedCPULimits = append(expectedCPULimits, getExpectedCPULimitFromCPUQuota(cpuQuotaRounded))
|
||||
}
|
||||
|
||||
return expectedCPULimits
|
||||
}
|
||||
|
||||
func getExpectedCPULimitFromCPUQuota(cpuQuota int64) string {
|
||||
expectedCPULimitString := strconv.FormatInt(cpuQuota, 10)
|
||||
if *podOnCgroupv2Node {
|
||||
if expectedCPULimitString == "-1" {
|
||||
expectedCPULimitString = "max"
|
||||
}
|
||||
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
|
||||
}
|
||||
return expectedCPULimitString
|
||||
}
|
||||
|
81
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
81
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -84,6 +84,27 @@ func BeRunningNoRetries() types.GomegaMatcher {
|
||||
)
|
||||
}
|
||||
|
||||
// BeRunningReadyNoRetries verifies that a pod starts running and has a ready
|
||||
// condition of status true. It's a permanent failure when the pod enters some
|
||||
// other permanent phase.
|
||||
func BeRunningReadyNoRetries() types.GomegaMatcher {
|
||||
return gomega.And(
|
||||
// This additional matcher checks for the final error condition.
|
||||
gcustom.MakeMatcher(func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, gomega.StopTrying(fmt.Sprintf("Expected pod to reach phase %q, got final phase %q instead:\n%s", v1.PodRunning, pod.Status.Phase, format.Object(pod, 1)))
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}),
|
||||
BeInPhase(v1.PodRunning),
|
||||
gcustom.MakeMatcher(func(pod *v1.Pod) (bool, error) {
|
||||
return podutils.IsPodReady(pod), nil
|
||||
}).WithMessage("Expected pod to have a ready condition of status true"),
|
||||
)
|
||||
}
|
||||
|
||||
// BeInPhase matches if pod.status.phase is the expected phase.
|
||||
func BeInPhase(phase v1.PodPhase) types.GomegaMatcher {
|
||||
// A simple implementation of this would be:
|
||||
@ -285,7 +306,7 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
// WaitForPodCondition waits for a pod to be matched to the given condition.
|
||||
// The condition callback may use gomega.StopTrying to abort early.
|
||||
func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
|
||||
return framework.Gomega().
|
||||
@ -305,6 +326,45 @@ func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName
|
||||
}))
|
||||
}
|
||||
|
||||
// WaitForPodObservedGeneration waits for a pod to have the given observed generation.
|
||||
func WaitForPodObservedGeneration(ctx context.Context, c clientset.Interface, ns, podName string, expectedGeneration int64, timeout time.Duration) error {
|
||||
return framework.Gomega().
|
||||
Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(ns).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
|
||||
if pod.Status.ObservedGeneration == expectedGeneration {
|
||||
return nil, nil
|
||||
}
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected pod generation to be %d, got %d instead:\n", expectedGeneration, pod.Status.ObservedGeneration)
|
||||
}, nil
|
||||
}))
|
||||
}
|
||||
|
||||
// WaitForPodConditionObservedGeneration waits for a pod condition to have the given observed generation.
|
||||
func WaitForPodConditionObservedGeneration(ctx context.Context, c clientset.Interface, ns, podName string, conditionType v1.PodConditionType, expectedGeneration int64, timeout time.Duration) error {
|
||||
return framework.Gomega().
|
||||
Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(ns).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == conditionType {
|
||||
if condition.ObservedGeneration == expectedGeneration {
|
||||
return nil, nil
|
||||
} else {
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected condition %s generation to be %d, got %d instead:\n", conditionType, expectedGeneration, condition.ObservedGeneration)
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return func() string {
|
||||
return fmt.Sprintf("could not find condition %s:\n", conditionType)
|
||||
}, nil
|
||||
}))
|
||||
}
|
||||
|
||||
// Range determines how many items must exist and how many must match a certain
|
||||
// condition. Values <= 0 are ignored.
|
||||
// TODO (?): move to test/e2e/framework/range
|
||||
@ -526,6 +586,16 @@ func WaitTimeoutForPodRunningInNamespace(ctx context.Context, c clientset.Interf
|
||||
Should(BeRunningNoRetries())
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodRunningReadyInNamespace waits the given timeout duration for the specified pod to become running
|
||||
// and have a ready condition of status true.
|
||||
// It does not need to exist yet when this function gets called and the pod is not expected to be recreated
|
||||
// when it succeeds or fails.
|
||||
func WaitTimeoutForPodRunningReadyInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return framework.Gomega().Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(namespace).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(BeRunningReadyNoRetries())
|
||||
}
|
||||
|
||||
// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running.
|
||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodRunningInNamespace(ctx context.Context, c clientset.Interface, pod *v1.Pod) error {
|
||||
@ -604,13 +674,12 @@ func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, p
|
||||
}
|
||||
|
||||
// WaitForPodsResponding waits for the pods to response.
|
||||
func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, wantName bool, timeout time.Duration, pods *v1.PodList) error {
|
||||
func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, selector labels.Selector, wantName bool, timeout time.Duration, pods *v1.PodList) error {
|
||||
if timeout == 0 {
|
||||
timeout = podRespondingTimeout
|
||||
}
|
||||
ginkgo.By("trying to dial each unique pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": controllerName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
|
||||
type response struct {
|
||||
podName string
|
||||
@ -801,7 +870,7 @@ func WaitForPodScheduled(ctx context.Context, c clientset.Interface, namespace,
|
||||
func WaitForPodContainerStarted(ctx context.Context, c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error {
|
||||
conditionDesc := fmt.Sprintf("container %d started", containerIndex)
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if containerIndex > len(pod.Status.ContainerStatuses)-1 {
|
||||
if containerIndex >= len(pod.Status.ContainerStatuses) {
|
||||
return false, nil
|
||||
}
|
||||
containerStatus := pod.Status.ContainerStatuses[containerIndex]
|
||||
@ -813,7 +882,7 @@ func WaitForPodContainerStarted(ctx context.Context, c clientset.Interface, name
|
||||
func WaitForPodInitContainerStarted(ctx context.Context, c clientset.Interface, namespace, podName string, initContainerIndex int, timeout time.Duration) error {
|
||||
conditionDesc := fmt.Sprintf("init container %d started", initContainerIndex)
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if initContainerIndex > len(pod.Status.InitContainerStatuses)-1 {
|
||||
if initContainerIndex >= len(pod.Status.InitContainerStatuses) {
|
||||
return false, nil
|
||||
}
|
||||
initContainerStatus := pod.Status.InitContainerStatuses[initContainerIndex]
|
||||
|
39
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pv/wait.go
generated
vendored
39
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/pv/wait.go
generated
vendored
@ -68,3 +68,42 @@ func WaitForPersistentVolumeClaimModified(ctx context.Context, c clientset.Inter
|
||||
}, nil
|
||||
}))
|
||||
}
|
||||
|
||||
// WaitForPersistentVolumeClaimModificationFailure waits the given timeout duration for the specified claim to have
|
||||
// failed to bind to the invalid volume attributes class.
|
||||
// Returns an error if timeout occurs first.
|
||||
func WaitForPersistentVolumeClaimModificationFailure(ctx context.Context, c clientset.Interface, claim *v1.PersistentVolumeClaim, timeout time.Duration) error {
|
||||
desiredClass := ptr.Deref(claim.Spec.VolumeAttributesClassName, "")
|
||||
|
||||
var match = func(claim *v1.PersistentVolumeClaim) bool {
|
||||
for _, condition := range claim.Status.Conditions {
|
||||
if condition.Type != v1.PersistentVolumeClaimVolumeModifyVolumeError {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// check if claim's current volume attributes class is NOT desired one, and has appropriate ModifyVolumeStatus
|
||||
currentClass := ptr.Deref(claim.Status.CurrentVolumeAttributesClassName, "")
|
||||
return claim.Status.Phase == v1.ClaimBound &&
|
||||
desiredClass != currentClass && claim.Status.ModifyVolumeStatus != nil &&
|
||||
(claim.Status.ModifyVolumeStatus.Status == v1.PersistentVolumeClaimModifyVolumeInProgress ||
|
||||
claim.Status.ModifyVolumeStatus.Status == v1.PersistentVolumeClaimModifyVolumeInfeasible)
|
||||
}
|
||||
|
||||
if match(claim) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return framework.Gomega().
|
||||
Eventually(ctx, framework.GetObject(c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get, claim.Name, metav1.GetOptions{})).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(func(claim *v1.PersistentVolumeClaim) (func() string, error) {
|
||||
if match(claim) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected claim's status to NOT be modified with the given VolumeAttirbutesClass %s, got instead:\n%s", desiredClass, format.Object(claim, 1))
|
||||
}, nil
|
||||
}))
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -66,7 +66,7 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
||||
// support.
|
||||
keyfile := ""
|
||||
switch provider {
|
||||
case "gce", "gke", "kubemark":
|
||||
case "gce", "kubemark":
|
||||
keyfile = os.Getenv("GCE_SSH_KEY")
|
||||
if keyfile == "" {
|
||||
keyfile = os.Getenv("GCE_SSH_PRIVATE_KEY_FILE")
|
||||
|
119
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
119
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -53,12 +53,6 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(justinsb): Avoid hardcoding this.
|
||||
awsMasterIP = "172.20.0.9"
|
||||
)
|
||||
|
||||
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
|
||||
@ -135,7 +129,7 @@ const (
|
||||
|
||||
var (
|
||||
// ProvidersWithSSH are those providers where each node is accessible with SSH
|
||||
ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"}
|
||||
ProvidersWithSSH = []string{"gce", "aws", "local", "azure"}
|
||||
)
|
||||
|
||||
// RunID is a unique identifier of the e2e run.
|
||||
@ -416,29 +410,12 @@ func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, ski
|
||||
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
|
||||
}
|
||||
|
||||
// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
|
||||
// Some components use EndpointSlices other Endpoints, we must verify that both objects meet the requirements.
|
||||
// WaitForServiceEndpointsNum waits until there are EndpointSlices for serviceName
|
||||
// containing a total of expectNum endpoints. (If the service is dual-stack, expectNum
|
||||
// must count the endpoints of both IP families.)
|
||||
func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
endpoint, err := c.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Unexpected error trying to get Endpoints for %s : %v", serviceName, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if countEndpointsNum(endpoint) != expectNum {
|
||||
Logf("Unexpected number of Endpoints, got %d, expected %d", countEndpointsNum(endpoint), expectNum)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Endpoints are single family but EndpointSlices can have dual stack addresses,
|
||||
// so we verify the number of addresses that matches the same family on both.
|
||||
addressType := discoveryv1.AddressTypeIPv4
|
||||
if isIPv6Endpoint(endpoint) {
|
||||
addressType = discoveryv1.AddressTypeIPv6
|
||||
}
|
||||
|
||||
esList, err := c.DiscoveryV1().EndpointSlices(namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, serviceName)})
|
||||
if err != nil {
|
||||
Logf("Unexpected error trying to get EndpointSlices for %s : %v", serviceName, err)
|
||||
@ -450,44 +427,18 @@ func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, name
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if countEndpointsSlicesNum(esList, addressType) != expectNum {
|
||||
Logf("Unexpected number of Endpoints on Slices, got %d, expected %d", countEndpointsSlicesNum(esList, addressType), expectNum)
|
||||
if countEndpointsSlicesNum(esList) != expectNum {
|
||||
Logf("Unexpected number of Endpoints on Slices, got %d, expected %d", countEndpointsSlicesNum(esList), expectNum)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func countEndpointsNum(e *v1.Endpoints) int {
|
||||
num := 0
|
||||
for _, sub := range e.Subsets {
|
||||
num += len(sub.Addresses)
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
// isIPv6Endpoint returns true if the Endpoint uses IPv6 addresses
|
||||
func isIPv6Endpoint(e *v1.Endpoints) bool {
|
||||
for _, sub := range e.Subsets {
|
||||
for _, addr := range sub.Addresses {
|
||||
if len(addr.IP) == 0 {
|
||||
continue
|
||||
}
|
||||
// Endpoints are single family, so it is enough to check only one address
|
||||
return netutils.IsIPv6String(addr.IP)
|
||||
}
|
||||
}
|
||||
// default to IPv4 an Endpoint without IP addresses
|
||||
return false
|
||||
}
|
||||
|
||||
func countEndpointsSlicesNum(epList *discoveryv1.EndpointSliceList, addressType discoveryv1.AddressType) int {
|
||||
func countEndpointsSlicesNum(epList *discoveryv1.EndpointSliceList) int {
|
||||
// EndpointSlices can contain the same address on multiple Slices
|
||||
addresses := sets.Set[string]{}
|
||||
for _, epSlice := range epList.Items {
|
||||
if epSlice.AddressType != addressType {
|
||||
continue
|
||||
}
|
||||
for _, ep := range epSlice.Endpoints {
|
||||
if len(ep.Addresses) > 0 {
|
||||
addresses.Insert(ep.Addresses[0])
|
||||
@ -678,38 +629,6 @@ func GetNodeExternalIPs(node *v1.Node) (ips []string) {
|
||||
return
|
||||
}
|
||||
|
||||
// getControlPlaneAddresses returns the externalIP, internalIP and hostname fields of control plane nodes.
|
||||
// If any of these is unavailable, empty slices are returned.
|
||||
func getControlPlaneAddresses(ctx context.Context, c clientset.Interface) ([]string, []string, []string) {
|
||||
var externalIPs, internalIPs, hostnames []string
|
||||
|
||||
// Populate the internal IPs.
|
||||
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("Failed to get kubernetes endpoints: %v", err)
|
||||
}
|
||||
for _, subset := range eps.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
if address.IP != "" {
|
||||
internalIPs = append(internalIPs, address.IP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the external IP/hostname.
|
||||
hostURL, err := url.Parse(TestContext.Host)
|
||||
if err != nil {
|
||||
Failf("Failed to parse hostname: %v", err)
|
||||
}
|
||||
if netutils.ParseIPSloppy(hostURL.Host) != nil {
|
||||
externalIPs = append(externalIPs, hostURL.Host)
|
||||
} else {
|
||||
hostnames = append(hostnames, hostURL.Host)
|
||||
}
|
||||
|
||||
return externalIPs, internalIPs, hostnames
|
||||
}
|
||||
|
||||
// GetControlPlaneNodes returns a list of control plane nodes
|
||||
func GetControlPlaneNodes(ctx context.Context, c clientset.Interface) *v1.NodeList {
|
||||
allNodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
@ -737,30 +656,6 @@ func GetControlPlaneNodes(ctx context.Context, c clientset.Interface) *v1.NodeLi
|
||||
return &cpNodes
|
||||
}
|
||||
|
||||
// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane.
|
||||
// It may return internal and external IPs, even if we expect for
|
||||
// e.g. internal IPs to be used (issue #56787), so that we can be
|
||||
// sure to block the control plane fully during tests.
|
||||
func GetControlPlaneAddresses(ctx context.Context, c clientset.Interface) []string {
|
||||
externalIPs, internalIPs, _ := getControlPlaneAddresses(ctx, c)
|
||||
|
||||
ips := sets.NewString()
|
||||
switch TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
for _, ip := range externalIPs {
|
||||
ips.Insert(ip)
|
||||
}
|
||||
for _, ip := range internalIPs {
|
||||
ips.Insert(ip)
|
||||
}
|
||||
case "aws":
|
||||
ips.Insert(awsMasterIP)
|
||||
default:
|
||||
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
|
||||
}
|
||||
return ips.List()
|
||||
}
|
||||
|
||||
// PrettyPrintJSON converts metrics to JSON format.
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
|
87
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
87
e2e/vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -53,14 +53,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
@ -179,18 +177,18 @@ func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, names
|
||||
// Restart the passed-in nfs-server by issuing a `rpc.nfsd 1` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads from
|
||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||
func RestartNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
func RestartNFSServer(ctx context.Context, f *framework.Framework, serverPod *v1.Pod) {
|
||||
const startcmd = "rpc.nfsd 1"
|
||||
_, _, err := PodExec(f, serverPod, startcmd)
|
||||
_, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, serverPod.Name, startcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Stop the passed-in nfs-server by issuing a `rpc.nfsd 0` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads to 0,
|
||||
// thus closing all open nfs connections.
|
||||
func StopNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
func StopNFSServer(ctx context.Context, f *framework.Framework, serverPod *v1.Pod) {
|
||||
const stopcmd = "rpc.nfsd 0 && for i in $(seq 200); do rpcinfo -p | grep -q nfs || break; sleep 1; done"
|
||||
_, _, err := PodExec(f, serverPod, stopcmd)
|
||||
_, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, serverPod.Name, stopcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -501,7 +499,7 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
|
||||
return clientPod, nil
|
||||
}
|
||||
|
||||
func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
|
||||
func testVolumeContent(ctx context.Context, f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
|
||||
ginkgo.By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
@ -512,7 +510,8 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||
|
||||
// Check that it's a real block device
|
||||
CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
|
||||
err = CheckVolumeModeOfPath(ctx, f, pod, test.Mode, deviceName)
|
||||
framework.ExpectNoError(err, "failed: getting the right privileges in the block device %v", deviceName)
|
||||
} else {
|
||||
// Filesystem: check content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
@ -522,7 +521,8 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
|
||||
// Check that a directory has been mounted
|
||||
dirName := filepath.Dir(fileName)
|
||||
CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
|
||||
err = CheckVolumeModeOfPath(ctx, f, pod, test.Mode, dirName)
|
||||
framework.ExpectNoError(err, "failed: getting the right privileges in the directory %v", dirName)
|
||||
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
// Filesystem: check fsgroup
|
||||
@ -576,7 +576,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, clientPod.Namespace, timeouts.PodDelete))
|
||||
}()
|
||||
|
||||
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
|
||||
testVolumeContent(ctx, f, clientPod, "", fsGroup, fsType, tests)
|
||||
|
||||
ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
|
||||
ec := &v1.EphemeralContainer{
|
||||
@ -587,7 +587,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo
|
||||
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(ctx, clientPod, ec, timeouts.PodStart)
|
||||
// The API server will return NotFound for the subresource when the feature is disabled
|
||||
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
|
||||
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
|
||||
testVolumeContent(ctx, f, clientPod, ec.Name, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// InjectContent inserts index.html with given content into given volume. It does so by
|
||||
@ -630,7 +630,7 @@ func InjectContent(ctx context.Context, f *framework.Framework, config TestConfi
|
||||
|
||||
// Check that the data have been really written in this pod.
|
||||
// This tests non-persistent volume types
|
||||
testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
|
||||
testVolumeContent(ctx, f, injectorPod, "", fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
@ -665,64 +665,27 @@ func generateWriteFileCmd(content, fullPath string) []string {
|
||||
}
|
||||
|
||||
// CheckVolumeModeOfPath check mode of volume
|
||||
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
func CheckVolumeModeOfPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) error {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// Check if block exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
|
||||
if err := e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("test -b %s", path)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Double check that it's not directory
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
if err := e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("test -d %s", path), 1); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Check if directory exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
|
||||
if err := e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("test -d %s", path)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Double check that it's not block
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
if exiterr, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := exiterr.ExitStatus()
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, err, stdout, stderr)
|
||||
if err := e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("test -b %s", path), 1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
if exiterr, ok := err.(clientexec.ExitError); ok {
|
||||
actualExitCode := exiterr.ExitStatus()
|
||||
gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
|
||||
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode)
|
||||
return nil
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
@ -313,7 +313,7 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
fmt.Fprintf(buffer,
|
||||
"%s pod: %s: %s/%s %s: %s %s\n",
|
||||
"%s pod: %s: %s/%s %s: %s %v\n",
|
||||
time.Now().Format(timeFormat),
|
||||
e.Type,
|
||||
pod.Namespace,
|
||||
|
7
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
7
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -215,7 +216,7 @@ func (l *ltrMgr) cleanupLocalVolumeDirectory(ctx context.Context, ltr *LocalTest
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryLink(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
hostDirBackend := hostDir + "-backend"
|
||||
cmd := fmt.Sprintf("mkdir %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
|
||||
cmd := fmt.Sprintf("mkdir -p %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDir)
|
||||
err := l.hostExec.IssueCommand(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
@ -235,7 +236,7 @@ func (l *ltrMgr) cleanupLocalVolumeDirectoryLink(ctx context.Context, ltr *Local
|
||||
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s", hostDir, hostDir, hostDir)
|
||||
cmd := fmt.Sprintf("mkdir -p %s && mount --bind %s %s", hostDir, hostDir, hostDir)
|
||||
err := l.hostExec.IssueCommand(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
@ -255,7 +256,7 @@ func (l *ltrMgr) cleanupLocalVolumeDirectoryBindMounted(ctx context.Context, ltr
|
||||
func (l *ltrMgr) setupLocalVolumeDirectoryLinkBindMounted(ctx context.Context, node *v1.Node, parameters map[string]string) *LocalTestResource {
|
||||
hostDir := l.getTestDir()
|
||||
hostDirBackend := hostDir + "-backend"
|
||||
cmd := fmt.Sprintf("mkdir %s && mount --bind %s %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
|
||||
cmd := fmt.Sprintf("mkdir -p %s && mount --bind %s %s && ln -s %s %s", hostDirBackend, hostDirBackend, hostDirBackend, hostDirBackend, hostDir)
|
||||
err := l.hostExec.IssueCommand(ctx, cmd, node)
|
||||
framework.ExpectNoError(err)
|
||||
return &LocalTestResource{
|
||||
|
89
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
89
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -64,9 +64,9 @@ const (
|
||||
)
|
||||
|
||||
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
|
||||
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
|
||||
func VerifyFSGroupInPod(ctx context.Context, f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
|
||||
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
|
||||
fsGroupResult := strings.Fields(stdout)[3]
|
||||
@ -91,7 +91,7 @@ func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interf
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
CheckWriteToPath(ctx, f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(ctx, KRestart, c, clientPod)
|
||||
@ -100,7 +100,7 @@ func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interf
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
CheckReadFromPath(ctx, f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, volumePath)
|
||||
}
|
||||
@ -111,7 +111,7 @@ func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interfac
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
CheckWriteToPath(ctx, f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(ctx, KRestart, c, clientPod)
|
||||
@ -120,7 +120,7 @@ func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interfac
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
CheckReadFromPath(ctx, f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, volumePath)
|
||||
}
|
||||
@ -151,7 +151,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
|
||||
ginkgo.By("Writing to the volume.")
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
CheckWriteToPath(ctx, f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
|
||||
@ -201,7 +201,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
|
||||
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
ginkgo.By("Testing that written file is accessible in the second pod.")
|
||||
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
CheckReadFromPath(ctx, f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "when deleting the second pod")
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
@ -446,28 +446,37 @@ func isSudoPresent(ctx context.Context, nodeIP string, provider string) bool {
|
||||
}
|
||||
|
||||
// CheckReadWriteToPath check that path can b e read and written
|
||||
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
func CheckReadWriteToPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// random -> file1
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
err := e2epod.VerifyExecInPodSucceed(ctx, f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
framework.ExpectNoError(err, "Failed to write to file1")
|
||||
// file1 -> dev (write to dev)
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
err = e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
framework.ExpectNoError(err, "Failed to write to block volume")
|
||||
// dev -> file2 (read from dev)
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
err = e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
framework.ExpectNoError(err, "Failed to read from block volume")
|
||||
// file1 == file2 (check contents)
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
|
||||
err = e2epod.VerifyExecInPodSucceed(ctx, f, pod, "diff /tmp/file1 /tmp/file2")
|
||||
framework.ExpectNoError(err, "Failed to compare file1 and file2")
|
||||
// Clean up temp files
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
err = e2epod.VerifyExecInPodSucceed(ctx, f, pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
framework.ExpectNoError(err, "Failed to clean up temp files")
|
||||
|
||||
// Check that writing file to block volume fails
|
||||
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
err = e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
framework.ExpectNoError(err, "Expected write to block volume to fail")
|
||||
} else {
|
||||
// text -> file1 (write to file)
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
err := e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
framework.ExpectNoError(err, "Failed to write to file1")
|
||||
// grep file1 (read from file and check contents)
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
|
||||
err = e2epod.VerifyExecInPodSucceed(ctx, f, pod, readFile("Hello word.", path))
|
||||
framework.ExpectNoError(err, "Failed to read from file1")
|
||||
// Check that writing to directory as block volume fails
|
||||
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
err = e2epod.VerifyExecInPodFail(ctx, f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
framework.ExpectNoError(err, "Expected write to directory to fail")
|
||||
}
|
||||
}
|
||||
|
||||
@ -481,9 +490,9 @@ func readFile(content, path string) string {
|
||||
// genBinDataFromSeed generate binData with random seed
|
||||
func genBinDataFromSeed(len int, seed int64) []byte {
|
||||
binData := make([]byte, len)
|
||||
rand.Seed(seed)
|
||||
r := rand.New(rand.NewSource(seed))
|
||||
|
||||
_, err := rand.Read(binData)
|
||||
_, err := r.Read(binData)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
}
|
||||
@ -497,7 +506,7 @@ func genBinDataFromSeed(len int, seed int64) []byte {
|
||||
// directIO to function correctly, is to read whole sector(s) for Block-mode
|
||||
// PVCs (normally a sector is 512 bytes), or memory pages for files (commonly
|
||||
// 4096 bytes).
|
||||
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
|
||||
func CheckReadFromPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
|
||||
var pathForVolMode string
|
||||
var iflag string
|
||||
|
||||
@ -513,8 +522,10 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten
|
||||
|
||||
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
|
||||
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
|
||||
err := e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
|
||||
framework.ExpectNoError(err, "Failed to read from %s", pathForVolMode)
|
||||
err = e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
|
||||
framework.ExpectNoError(err, "Failed to read from %s", pathForVolMode)
|
||||
}
|
||||
|
||||
// CheckWriteToPath that file can be properly written.
|
||||
@ -522,7 +533,7 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten
|
||||
// Note: nocache does not work with (default) BusyBox Pods. To read without
|
||||
// caching, enable directIO with CheckReadFromPath and check the hints about
|
||||
// the len requirements.
|
||||
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
|
||||
func CheckWriteToPath(ctx context.Context, f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
|
||||
var pathForVolMode string
|
||||
var oflag string
|
||||
|
||||
@ -538,13 +549,15 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent
|
||||
|
||||
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
|
||||
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
|
||||
err := e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||
framework.ExpectNoError(err, "Failed to generate sha256sum of encoded data")
|
||||
err = e2epod.VerifyExecInPodSucceed(ctx, f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
|
||||
framework.ExpectNoError(err, "Failed to write to %s", pathForVolMode)
|
||||
}
|
||||
|
||||
// GetSectorSize returns the sector size of the device.
|
||||
func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int {
|
||||
stdout, _, err := e2evolume.PodExec(f, pod, fmt.Sprintf("blockdev --getss %s", device))
|
||||
func GetSectorSize(ctx context.Context, f *framework.Framework, pod *v1.Pod, device string) int {
|
||||
stdout, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, fmt.Sprintf("blockdev --getss %s", device))
|
||||
framework.ExpectNoError(err, "Failed to get sector size of %s", device)
|
||||
ss, err := strconv.Atoi(stdout)
|
||||
framework.ExpectNoError(err, "Sector size returned by blockdev command isn't integer value.")
|
||||
@ -722,23 +735,23 @@ func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.Gr
|
||||
return err
|
||||
}
|
||||
|
||||
// VerifyFilePathGidInPod verfies expected GID of the target filepath
|
||||
func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
|
||||
// VerifyFilePathGIDInPod verfies expected GID of the target filepath
|
||||
func VerifyFilePathGIDInPod(ctx context.Context, f *framework.Framework, filePath, expectedGID string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
|
||||
stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
|
||||
ll := strings.Fields(stdout)
|
||||
framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid)
|
||||
gomega.Expect(ll[3]).To(gomega.Equal(expectedGid))
|
||||
framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGID)
|
||||
gomega.Expect(ll[3]).To(gomega.Equal(expectedGID))
|
||||
}
|
||||
|
||||
// ChangeFilePathGidInPod changes the GID of the target filepath.
|
||||
func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
|
||||
_, _, err := e2evolume.PodExec(f, pod, cmd)
|
||||
// ChangeFilePathGIDInPod changes the GID of the target filepath.
|
||||
func ChangeFilePathGIDInPod(ctx context.Context, f *framework.Framework, filePath, targetGID string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("chgrp %s %s", targetGID, filePath)
|
||||
_, _, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, pod.Name, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
VerifyFilePathGidInPod(f, filePath, targetGid, pod)
|
||||
VerifyFilePathGIDInPod(ctx, f, filePath, targetGID, pod)
|
||||
}
|
||||
|
||||
// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
||||
|
7
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go
generated
vendored
7
e2e/vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go
generated
vendored
@ -33,15 +33,16 @@ const (
|
||||
// VolumeGroupSnapshot is the group snapshot api
|
||||
VolumeGroupSnapshotAPIGroup = "groupsnapshot.storage.k8s.io"
|
||||
// VolumeGroupSnapshotAPIVersion is the group snapshot api version
|
||||
VolumeGroupSnapshotAPIVersion = "groupsnapshot.storage.k8s.io/v1alpha1"
|
||||
VolumeGroupSnapshotAPIVersion = "groupsnapshot.storage.k8s.io/v1beta1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// VolumeGroupSnapshotGVR is GroupVersionResource for volumegroupsnapshots
|
||||
VolumeGroupSnapshotGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshots"}
|
||||
VolumeGroupSnapshotGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1beta1", Resource: "volumegroupsnapshots"}
|
||||
// VolumeGroupSnapshotClassGVR is GroupVersionResource for volumegroupsnapshotsclasses
|
||||
VolumeGroupSnapshotClassGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshotclasses"}
|
||||
VolumeGroupSnapshotClassGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1beta1", Resource: "volumegroupsnapshotclasses"}
|
||||
VolumeGroupSnapshotContentGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1beta1", Resource: "volumegroupsnapshotcontents"}
|
||||
)
|
||||
|
||||
// WaitForVolumeGroupSnapshotReady waits for a VolumeGroupSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||
|
41
e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/dra/dra-test-driver-proxy.yaml
generated
vendored
41
e2e/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/dra/dra-test-driver-proxy.yaml
generated
vendored
@ -46,40 +46,35 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
containers:
|
||||
- name: registrar
|
||||
- name: pause
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=/plugins_registry/dra-test-driver-reg.sock"
|
||||
- "--proxy-endpoint=tcp://:9000"
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- while true; do sleep 10000; done
|
||||
volumeMounts:
|
||||
- mountPath: /plugins_registry
|
||||
name: registration-dir
|
||||
|
||||
- name: plugin
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=/dra/dra-test-driver.sock"
|
||||
- "--proxy-endpoint=tcp://:9001"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /dra
|
||||
- mountPath: /var/lib/kubelet/plugins/test-driver.dra.k8s.io
|
||||
name: socket-dir
|
||||
- mountPath: /var/lib/kubelet/plugins_registry
|
||||
name: registration-dir
|
||||
- mountPath: /cdi
|
||||
name: cdi-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
# There's no way to remove this. Conceptually, it may have to
|
||||
# survive Pod restarts and there's no way to tell the kubelet
|
||||
# that this isn't the case for this Pod.
|
||||
path: /var/lib/kubelet/plugins/test-driver.dra.k8s.io
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/run/cdi
|
||||
type: DirectoryOrCreate
|
||||
name: cdi-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: DirectoryOrCreate
|
||||
name: registration-dir
|
||||
- hostPath:
|
||||
path: /var/run/cdi
|
||||
type: DirectoryOrCreate
|
||||
name: cdi-dir
|
||||
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814"
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
@ -31,7 +31,7 @@ spec:
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068"
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
@ -237,8 +237,6 @@ spec:
|
||||
- message: both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace
|
||||
must be set
|
||||
rule: has(self.name) && has(self.__namespace__)
|
||||
- message: volumeGroupSnapshotRef is immutable
|
||||
rule: self == oldSelf
|
||||
required:
|
||||
- deletionPolicy
|
||||
- driver
|
||||
@ -257,8 +255,9 @@ spec:
|
||||
The format of this field is a Unix nanoseconds time encoded as an int64.
|
||||
On Unix, the command date +%s%N returns the current time in nanoseconds
|
||||
since 1970-01-01 00:00:00 UTC.
|
||||
format: int64
|
||||
type: integer
|
||||
This field is the source for the CreationTime field in VolumeGroupSnapshotStatus
|
||||
format: date-time
|
||||
type: string
|
||||
error:
|
||||
description: |-
|
||||
Error is the last observed error during group snapshot creation, if any.
|
||||
@ -276,42 +275,6 @@ spec:
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
pvVolumeSnapshotContentList:
|
||||
description: |-
|
||||
PVVolumeSnapshotContentList is the list of pairs of PV and
|
||||
VolumeSnapshotContent for this group snapshot
|
||||
The maximum number of allowed snapshots in the group is 100.
|
||||
items:
|
||||
description: |-
|
||||
PVVolumeSnapshotContentPair represent a pair of PV names and
|
||||
VolumeSnapshotContent names
|
||||
properties:
|
||||
persistentVolumeRef:
|
||||
description: PersistentVolumeRef is a reference to the persistent
|
||||
volume resource
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
volumeSnapshotContentRef:
|
||||
description: VolumeSnapshotContentRef is a reference to the
|
||||
volume snapshot content resource
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
type: array
|
||||
readyToUse:
|
||||
description: |-
|
||||
ReadyToUse indicates if all the individual snapshots in the group are ready to be
|
||||
@ -325,6 +288,32 @@ spec:
|
||||
If a storage system does not provide such an id, the
|
||||
CSI driver can choose to return the VolumeGroupSnapshot name.
|
||||
type: string
|
||||
volumeSnapshotHandlePairList:
|
||||
description: |-
|
||||
VolumeSnapshotHandlePairList is a list of CSI "volume_id" and "snapshot_id"
|
||||
pair returned by the CSI driver to identify snapshots and their source volumes
|
||||
on the storage system.
|
||||
items:
|
||||
description: VolumeSnapshotHandlePair defines a pair of a source
|
||||
volume handle and a snapshot handle
|
||||
properties:
|
||||
snapshotHandle:
|
||||
description: |-
|
||||
SnapshotHandle is a unique id returned by the CSI driver to identify a volume
|
||||
snapshot on the storage system
|
||||
Required.
|
||||
type: string
|
||||
volumeHandle:
|
||||
description: |-
|
||||
VolumeHandle is a unique id returned by the CSI driver to identify a volume
|
||||
on the storage system
|
||||
Required.
|
||||
type: string
|
||||
required:
|
||||
- snapshotHandle
|
||||
- volumeHandle
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
|
@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068"
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1150"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshots.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
@ -43,7 +43,7 @@ spec:
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
@ -198,6 +198,7 @@ spec:
|
||||
The format of this field is a Unix nanoseconds time encoded as an int64.
|
||||
On Unix, the command date +%s%N returns the current time in nanoseconds
|
||||
since 1970-01-01 00:00:00 UTC.
|
||||
This field is updated based on the CreationTime field in VolumeGroupSnapshotContentStatus
|
||||
format: date-time
|
||||
type: string
|
||||
error:
|
||||
@ -221,41 +222,6 @@ spec:
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
pvcVolumeSnapshotRefList:
|
||||
description: |-
|
||||
VolumeSnapshotRefList is the list of PVC and VolumeSnapshot pairs that
|
||||
is part of this group snapshot.
|
||||
The maximum number of allowed snapshots in the group is 100.
|
||||
items:
|
||||
description: PVCVolumeSnapshotPair defines a pair of a PVC reference
|
||||
and a Volume Snapshot Reference
|
||||
properties:
|
||||
persistentVolumeClaimRef:
|
||||
description: PersistentVolumeClaimRef is a reference to the
|
||||
PVC this pair is referring to
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
volumeSnapshotRef:
|
||||
description: VolumeSnapshotRef is a reference to the VolumeSnapshot
|
||||
this pair is referring to
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
type: array
|
||||
readyToUse:
|
||||
description: |-
|
||||
ReadyToUse indicates if all the individual snapshots in the group are ready
|
||||
|
@ -219,7 +219,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.16.1
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -276,7 +276,7 @@ spec:
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.13.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -304,13 +304,13 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.15.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.8.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -324,7 +324,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -340,7 +340,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.13.1
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
@ -354,11 +354,11 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --enable-volume-group-snapshots=true
|
||||
- --feature-gates=CSIVolumeGroupSnapshot=true
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
|
@ -278,10 +278,10 @@ run_tests() {
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml || exit 1
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml || exit 1
|
||||
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.0.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1
|
||||
curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/release-8.1/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \
|
||||
awk '/--leader-election=true/ {print; print " - \"--enable-volume-group-snapshots=true\""; next}1' | \
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1
|
||||
curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/refs/tags/v8.2.0/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \
|
||||
awk '/--leader-election=true/ {print; print " - \"--feature-gates=CSIVolumeGroupSnapshot=true\""; next}1' | \
|
||||
sed 's|image: registry.k8s.io/sig-storage/snapshot-controller:v8.0.1|image: registry.k8s.io/sig-storage/snapshot-controller:v8.2.0|' | \
|
||||
kubectl apply -f - || exit 1
|
||||
|
||||
|
||||
|
@ -13,7 +13,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.13.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -48,7 +48,7 @@ spec:
|
||||
- name: gce-pd-driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.2.2
|
||||
image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.4.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=unix:/csi/csi.sock"
|
||||
|
@ -219,7 +219,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.16.1
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -354,7 +354,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
@ -66,7 +66,7 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: socat
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.16.1
|
||||
command:
|
||||
- socat
|
||||
args:
|
||||
|
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.16.1
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
|
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.16.1
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
@ -77,7 +77,7 @@ spec:
|
||||
# test for directories or create them. It needs additional privileges
|
||||
# for that.
|
||||
- name: busybox
|
||||
image: registry.k8s.io/e2e-test-images/busybox:1.29-2
|
||||
image: registry.k8s.io/e2e-test-images/busybox:1.36.1-1
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
|
4
e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
4
e2e/vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -223,8 +223,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
|
||||
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.29.2"}
|
||||
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.6.8"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.16-0"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.4"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.21-0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
|
Reference in New Issue
Block a user