mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
1
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -35,7 +35,6 @@ rules:
|
||||
- selectorRegexp: ^github.com/|^gopkg.in
|
||||
allowedPrefixes: [
|
||||
"gopkg.in/inf.v0",
|
||||
"gopkg.in/yaml.v2",
|
||||
"gopkg.in/evanphx/json-patch.v4",
|
||||
"github.com/blang/semver/",
|
||||
"github.com/davecgh/go-spew/spew",
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
@ -595,7 +596,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
}
|
||||
}
|
||||
if len(violatedConstraints) > 0 {
|
||||
return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n"))
|
||||
return &summary, errors.New(strings.Join(violatedConstraints, "\n"))
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -294,12 +294,22 @@ func (f *FailureError) backtrace() {
|
||||
var ErrFailure error = FailureError{}
|
||||
|
||||
// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
//
|
||||
// As in [gomega.Expect], the explain parameters can be used to provide
|
||||
// additional information in case of a failure in one of these two ways:
|
||||
// - A single string is used as first line of the failure message directly.
|
||||
// - A string with additional parameters is passed through [fmt.Sprintf].
|
||||
func ExpectNoError(err error, explain ...interface{}) {
|
||||
ExpectNoErrorWithOffset(1, err, explain...)
|
||||
}
|
||||
|
||||
// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
//
|
||||
// As in [gomega.Expect], the explain parameters can be used to provide
|
||||
// additional information in case of a failure in one of these two ways:
|
||||
// - A single string is used as first line of the failure message directly.
|
||||
// - A string with additional parameters is passed through [fmt.Sprintf].
|
||||
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
@ -57,7 +57,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
|
||||
if desc != "" {
|
||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||
}
|
||||
Logf(msg)
|
||||
Logf("%s", msg)
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.Flakes = append(f.Flakes, msg)
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -311,7 +311,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
switch TestContext.OutputPrintType {
|
||||
case "hr":
|
||||
if TestContext.ReportDir == "" {
|
||||
Logf(summaries[i].PrintHumanReadable())
|
||||
Logf("%s", summaries[i].PrintHumanReadable())
|
||||
} else {
|
||||
// TODO: learn to extract test name and append it to the kind instead of timestamp.
|
||||
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
|
||||
@ -393,7 +393,7 @@ func (f *Framework) AfterEach(ctx context.Context) {
|
||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||
}
|
||||
Failf(strings.Join(messages, ","))
|
||||
Fail(strings.Join(messages, ","))
|
||||
}
|
||||
}()
|
||||
|
||||
|
55
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
55
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
@ -18,19 +18,22 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -166,3 +169,51 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsARM64 checks whether the k8s Node has arm64 arch.
|
||||
func IsARM64(node *v1.Node) bool {
|
||||
arch, ok := node.Labels["kubernetes.io/arch"]
|
||||
if ok {
|
||||
return arch == "arm64"
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// AddExtendedResource adds a fake resource to the Node.
|
||||
func AddExtendedResource(ctx context.Context, clientSet clientset.Interface, nodeName string, extendedResourceName v1.ResourceName, extendedResourceQuantity resource.Quantity) {
|
||||
extendedResource := v1.ResourceName(extendedResourceName)
|
||||
|
||||
ginkgo.By("Adding a custom resource")
|
||||
extendedResourceList := v1.ResourceList{
|
||||
extendedResource: extendedResourceQuantity,
|
||||
}
|
||||
patchPayload, err := json.Marshal(v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: extendedResourceList,
|
||||
Allocatable: extendedResourceList,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to marshal node JSON")
|
||||
|
||||
_, err = clientSet.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, []byte(patchPayload), metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// RemoveExtendedResource removes a fake resource from the Node.
|
||||
func RemoveExtendedResource(ctx context.Context, clientSet clientset.Interface, nodeName string, extendedResourceName v1.ResourceName) {
|
||||
extendedResource := v1.ResourceName(extendedResourceName)
|
||||
|
||||
ginkgo.By("Removing a custom resource")
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
node, err := clientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node %s: %w", nodeName, err)
|
||||
}
|
||||
delete(node.Status.Capacity, extendedResource)
|
||||
delete(node.Status.Allocatable, extendedResource)
|
||||
_, err = clientSet.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
}
|
||||
if !silent {
|
||||
framework.Logf(msg)
|
||||
framework.Logf("%s", msg)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -495,6 +495,16 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetNodeHeartbeatTime returns the timestamp of the last status update of the node.
|
||||
func GetNodeHeartbeatTime(node *v1.Node) metav1.Time {
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type == v1.NodeReady {
|
||||
return condition.LastHeartbeatTime
|
||||
}
|
||||
}
|
||||
return metav1.Time{}
|
||||
}
|
||||
|
||||
// PodNodePairs return podNode pairs for all pods in a namespace
|
||||
func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
var result []PodNode
|
||||
@ -822,6 +832,6 @@ func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
if taintExists(nodeUpdated.Spec.Taints, taint) {
|
||||
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
framework.Fail("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
}
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -160,6 +161,23 @@ func WaitForNodeSchedulable(ctx context.Context, c clientset.Interface, name str
|
||||
return false
|
||||
}
|
||||
|
||||
// WaitForNodeHeartbeatAfter waits up to timeout for node to send the next
|
||||
// heartbeat after the given timestamp.
|
||||
//
|
||||
// To ensure the node status is posted by a restarted kubelet process,
|
||||
// after should be retrieved by [GetNodeHeartbeatTime] while the kubelet is down.
|
||||
func WaitForNodeHeartbeatAfter(ctx context.Context, c clientset.Interface, name string, after metav1.Time, timeout time.Duration) {
|
||||
framework.Logf("Waiting up to %v for node %s to send a heartbeat after %v", timeout, name, after)
|
||||
gomega.Eventually(ctx, func() (time.Time, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't get node %s", name)
|
||||
return time.Time{}, err
|
||||
}
|
||||
return GetNodeHeartbeatTime(node).Time, nil
|
||||
}, timeout, poll).Should(gomega.BeTemporally(">", after.Time), "Node %s didn't send a heartbeat", name)
|
||||
}
|
||||
|
||||
// CheckReady waits up to timeout for cluster to has desired size and
|
||||
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
|
||||
func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
@ -58,8 +58,6 @@ func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options
|
||||
if !options.Quiet {
|
||||
framework.Logf("ExecWithOptions %+v", options)
|
||||
}
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load restclient config")
|
||||
|
||||
const tty = false
|
||||
|
||||
@ -68,8 +66,7 @@ func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options
|
||||
Resource("pods").
|
||||
Name(options.PodName).
|
||||
Namespace(options.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", options.ContainerName)
|
||||
SubResource("exec")
|
||||
req.VersionedParams(&v1.PodExecOptions{
|
||||
Container: options.ContainerName,
|
||||
Command: options.Command,
|
||||
@ -81,7 +78,7 @@ func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
err = execute(ctx, "POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
err := execute(ctx, "POST", req.URL(), f.ClientConfig(), options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
70
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
@ -157,6 +157,16 @@ func MatchContainerOutput(
|
||||
containerName string,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
|
||||
return MatchMultipleContainerOutputs(ctx, f, pod, map[string][]string{containerName: expectedOutput}, matcher)
|
||||
}
|
||||
|
||||
func MatchMultipleContainerOutputs(
|
||||
ctx context.Context,
|
||||
f *framework.Framework,
|
||||
pod *v1.Pod,
|
||||
expectedOutputs map[string][]string, // map of container name -> expected outputs
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
ns := pod.ObjectMeta.Namespace
|
||||
if ns == "" {
|
||||
ns = f.Namespace.Name
|
||||
@ -193,24 +203,26 @@ func MatchContainerOutput(
|
||||
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
|
||||
}
|
||||
|
||||
framework.Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
for cName, expectedOutput := range expectedOutputs {
|
||||
framework.Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, cName, err)
|
||||
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, containerName)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, cName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %w", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, cName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, cName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %w", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,7 +240,11 @@ func TestContainerOutput(ctx context.Context, f *framework.Framework, scenarioNa
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func TestContainerOutputRegexp(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
TestContainerOutputsRegexp(ctx, f, scenarioName, pod, map[int][]string{containerIndex: expectedOutput})
|
||||
}
|
||||
|
||||
func TestContainerOutputsRegexp(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, expectedOutputs map[int][]string) {
|
||||
TestContainerOutputsMatcher(ctx, f, scenarioName, pod, expectedOutputs, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// TestContainerOutputMatcher runs the given pod in the given namespace and waits
|
||||
@ -246,3 +262,23 @@ func TestContainerOutputMatcher(ctx context.Context, f *framework.Framework,
|
||||
}
|
||||
framework.ExpectNoError(MatchContainerOutput(ctx, f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
|
||||
}
|
||||
|
||||
func TestContainerOutputsMatcher(ctx context.Context, f *framework.Framework,
|
||||
scenarioName string,
|
||||
pod *v1.Pod,
|
||||
expectedOutputs map[int][]string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
|
||||
|
||||
expectedNameOutputs := make(map[string][]string, len(expectedOutputs))
|
||||
for containerIndex, expectedOutput := range expectedOutputs {
|
||||
expectedOutput := expectedOutput
|
||||
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
|
||||
framework.Failf("Invalid container index: %d", containerIndex)
|
||||
}
|
||||
expectedNameOutputs[pod.Spec.Containers[containerIndex].Name] = expectedOutput
|
||||
}
|
||||
framework.ExpectNoError(MatchMultipleContainerOutputs(ctx, f, pod, expectedNameOutputs, matcher))
|
||||
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
@ -266,7 +266,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
}
|
||||
// If the image policy is not PullAlways, the image must be in the pre-pull list and
|
||||
// pre-pulled.
|
||||
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image)
|
||||
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrueBecause("Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image))
|
||||
// Do not pull images during the tests because the images in pre-pull list should have
|
||||
// been prepulled.
|
||||
c.ImagePullPolicy = v1.PullNever
|
||||
@ -308,8 +308,13 @@ func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time
|
||||
|
||||
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
|
||||
func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) {
|
||||
return c.WaitForErrorEventOrSuccessWithTimeout(ctx, pod, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForErrorEventOrSuccessWithTimeout waits for pod to succeed or an error event for that pod for a specified time
|
||||
func (c *PodClient) WaitForErrorEventOrSuccessWithTimeout(ctx context.Context, pod *v1.Pod, timeout time.Duration) (*v1.Event, error) {
|
||||
var ev *v1.Event
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %w", err)
|
||||
|
397
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go
generated
vendored
Normal file
397
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go
generated
vendored
Normal file
@ -0,0 +1,397 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
CgroupCPUPeriod string = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
|
||||
CgroupCPUShares string = "/sys/fs/cgroup/cpu/cpu.shares"
|
||||
CgroupCPUQuota string = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||
CgroupMemLimit string = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
|
||||
Cgroupv2MemLimit string = "/sys/fs/cgroup/memory.max"
|
||||
Cgroupv2MemRequest string = "/sys/fs/cgroup/memory.min"
|
||||
Cgroupv2CPULimit string = "/sys/fs/cgroup/cpu.max"
|
||||
Cgroupv2CPURequest string = "/sys/fs/cgroup/cpu.weight"
|
||||
CPUPeriod string = "100000"
|
||||
MinContainerRuntimeVersion string = "1.6.9"
|
||||
)
|
||||
|
||||
var (
|
||||
podOnCgroupv2Node *bool
|
||||
)
|
||||
|
||||
type ContainerResources struct {
|
||||
CPUReq string
|
||||
CPULim string
|
||||
MemReq string
|
||||
MemLim string
|
||||
EphStorReq string
|
||||
EphStorLim string
|
||||
ExtendedResourceReq string
|
||||
ExtendedResourceLim string
|
||||
}
|
||||
|
||||
func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements {
|
||||
if cr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var lim, req v1.ResourceList
|
||||
if cr.CPULim != "" || cr.MemLim != "" || cr.EphStorLim != "" {
|
||||
lim = make(v1.ResourceList)
|
||||
}
|
||||
if cr.CPUReq != "" || cr.MemReq != "" || cr.EphStorReq != "" {
|
||||
req = make(v1.ResourceList)
|
||||
}
|
||||
if cr.CPULim != "" {
|
||||
lim[v1.ResourceCPU] = resource.MustParse(cr.CPULim)
|
||||
}
|
||||
if cr.MemLim != "" {
|
||||
lim[v1.ResourceMemory] = resource.MustParse(cr.MemLim)
|
||||
}
|
||||
if cr.EphStorLim != "" {
|
||||
lim[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorLim)
|
||||
}
|
||||
if cr.CPUReq != "" {
|
||||
req[v1.ResourceCPU] = resource.MustParse(cr.CPUReq)
|
||||
}
|
||||
if cr.MemReq != "" {
|
||||
req[v1.ResourceMemory] = resource.MustParse(cr.MemReq)
|
||||
}
|
||||
if cr.EphStorReq != "" {
|
||||
req[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorReq)
|
||||
}
|
||||
return &v1.ResourceRequirements{Limits: lim, Requests: req}
|
||||
}
|
||||
|
||||
type ResizableContainerInfo struct {
|
||||
Name string
|
||||
Resources *ContainerResources
|
||||
CPUPolicy *v1.ResourceResizeRestartPolicy
|
||||
MemPolicy *v1.ResourceResizeRestartPolicy
|
||||
RestartCount int32
|
||||
}
|
||||
|
||||
type containerPatch struct {
|
||||
Name string `json:"name"`
|
||||
Resources struct {
|
||||
Requests struct {
|
||||
CPU string `json:"cpu,omitempty"`
|
||||
Memory string `json:"memory,omitempty"`
|
||||
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||
} `json:"requests"`
|
||||
Limits struct {
|
||||
CPU string `json:"cpu,omitempty"`
|
||||
Memory string `json:"memory,omitempty"`
|
||||
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||
} `json:"limits"`
|
||||
} `json:"resources"`
|
||||
}
|
||||
|
||||
type patchSpec struct {
|
||||
Spec struct {
|
||||
Containers []containerPatch `json:"containers"`
|
||||
} `json:"spec"`
|
||||
}
|
||||
|
||||
func getTestResourceInfo(tcInfo ResizableContainerInfo) (res v1.ResourceRequirements, resizePol []v1.ContainerResizePolicy) {
|
||||
if tcInfo.Resources != nil {
|
||||
res = *tcInfo.Resources.ResourceRequirements()
|
||||
}
|
||||
if tcInfo.CPUPolicy != nil {
|
||||
cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy}
|
||||
resizePol = append(resizePol, cpuPol)
|
||||
}
|
||||
if tcInfo.MemPolicy != nil {
|
||||
memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy}
|
||||
resizePol = append(resizePol, memPol)
|
||||
}
|
||||
return res, resizePol
|
||||
}
|
||||
|
||||
func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
|
||||
noRestart := v1.NotRequired
|
||||
setDefaultPolicy := func(ci *ResizableContainerInfo) {
|
||||
if ci.CPUPolicy == nil {
|
||||
ci.CPUPolicy = &noRestart
|
||||
}
|
||||
if ci.MemPolicy == nil {
|
||||
ci.MemPolicy = &noRestart
|
||||
}
|
||||
}
|
||||
for i := range containers {
|
||||
setDefaultPolicy(&containers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
|
||||
cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
|
||||
res, resizePol := getTestResourceInfo(tcInfo)
|
||||
|
||||
tc := v1.Container{
|
||||
Name: tcInfo.Name,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", cmd},
|
||||
Resources: res,
|
||||
ResizePolicy: resizePol,
|
||||
}
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
|
||||
var testContainers []v1.Container
|
||||
|
||||
for _, ci := range tcInfo {
|
||||
tc := makeResizableContainer(ci)
|
||||
testContainers = append(testContainers, tc)
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"time": timeStamp,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
OS: &v1.PodOS{Name: v1.Linux},
|
||||
Containers: testContainers,
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
var errs []error
|
||||
|
||||
if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) {
|
||||
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
|
||||
len(gotPod.Status.ContainerStatuses), len(wantCtrs))
|
||||
}
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
if gotCtrStatus.Name != ctr.Name {
|
||||
errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name))
|
||||
continue
|
||||
}
|
||||
if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(ctr.Resources)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", ctr.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework, pod *v1.Pod, tcInfo []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
if podOnCgroupv2Node == nil {
|
||||
value := IsPodOnCgroupv2Node(f, pod)
|
||||
podOnCgroupv2Node = &value
|
||||
}
|
||||
cgroupMemLimit := Cgroupv2MemLimit
|
||||
cgroupCPULimit := Cgroupv2CPULimit
|
||||
cgroupCPURequest := Cgroupv2CPURequest
|
||||
if !*podOnCgroupv2Node {
|
||||
cgroupMemLimit = CgroupMemLimit
|
||||
cgroupCPULimit = CgroupCPUQuota
|
||||
cgroupCPURequest = CgroupCPUShares
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for _, ci := range tcInfo {
|
||||
if ci.Resources == nil {
|
||||
continue
|
||||
}
|
||||
tc := makeResizableContainer(ci)
|
||||
if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
|
||||
var expectedCPUShares int64
|
||||
var expectedCPULimitString, expectedMemLimitString string
|
||||
expectedMemLimitInBytes := tc.Resources.Limits.Memory().Value()
|
||||
cpuRequest := tc.Resources.Requests.Cpu()
|
||||
cpuLimit := tc.Resources.Limits.Cpu()
|
||||
if cpuRequest.IsZero() && !cpuLimit.IsZero() {
|
||||
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuLimit.MilliValue()))
|
||||
} else {
|
||||
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuRequest.MilliValue()))
|
||||
}
|
||||
cpuQuota := kubecm.MilliCPUToQuota(cpuLimit.MilliValue(), kubecm.QuotaPeriod)
|
||||
if cpuLimit.IsZero() {
|
||||
cpuQuota = -1
|
||||
}
|
||||
expectedCPULimitString = strconv.FormatInt(cpuQuota, 10)
|
||||
expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10)
|
||||
if *podOnCgroupv2Node {
|
||||
if expectedCPULimitString == "-1" {
|
||||
expectedCPULimitString = "max"
|
||||
}
|
||||
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
|
||||
if expectedMemLimitString == "0" {
|
||||
expectedMemLimitString = "max"
|
||||
}
|
||||
// convert cgroup v1 cpu.shares value to cgroup v2 cpu.weight value
|
||||
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
|
||||
expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
|
||||
}
|
||||
if expectedMemLimitString != "0" {
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupMemLimit, expectedMemLimitString))
|
||||
}
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimitString))
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyContainerRestarts(pod *v1.Pod, expectedContainers []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
expectContainerRestarts := map[string]int32{}
|
||||
for _, ci := range expectedContainers {
|
||||
expectContainerRestarts[ci.Name] = ci.RestartCount
|
||||
}
|
||||
|
||||
errs := []error{}
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
expectedRestarts := expectContainerRestarts[cs.Name]
|
||||
if cs.RestartCount != expectedRestarts {
|
||||
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts))
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
// Wait for resize to complete.
|
||||
framework.ExpectNoError(WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "resize status cleared", f.Timeouts.PodStart,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
// This is a terminal resize state
|
||||
return false, fmt.Errorf("resize is infeasible")
|
||||
}
|
||||
return pod.Status.Resize == "", nil
|
||||
}), "pod should finish resizing")
|
||||
|
||||
resizedPod, err := framework.GetObject(podClient.Get, pod.Name, metav1.GetOptions{})(ctx)
|
||||
framework.ExpectNoError(err, "failed to get resized pod")
|
||||
return resizedPod
|
||||
}
|
||||
|
||||
func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v1.Pod, expectedContainers []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
// Put each error on a new line for readability.
|
||||
formatErrors := func(err error) error {
|
||||
var agg utilerrors.Aggregate
|
||||
if !errors.As(err, &agg) {
|
||||
return err
|
||||
}
|
||||
|
||||
errStrings := make([]string, len(agg.Errors()))
|
||||
for i, err := range agg.Errors() {
|
||||
errStrings[i] = err.Error()
|
||||
}
|
||||
return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n"))
|
||||
}
|
||||
// Verify Pod Containers Cgroup Values
|
||||
var errs []error
|
||||
if cgroupErrs := VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers); cgroupErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container cgroup values don't match expected: %w", formatErrors(cgroupErrs)))
|
||||
}
|
||||
if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
|
||||
}
|
||||
if restartErrs := verifyContainerRestarts(resizedPod, expectedContainers); restartErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
resizedPod.ManagedFields = nil // Suppress managed fields in error output.
|
||||
framework.ExpectNoError(formatErrors(utilerrors.NewAggregate(errs)),
|
||||
"Verifying pod resources resize state. Pod: %s", framework.PrettyPrintJSON(resizedPod))
|
||||
}
|
||||
}
|
||||
|
||||
// ResizeContainerPatch generates a patch string to resize the pod container.
|
||||
func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
|
||||
var patch patchSpec
|
||||
|
||||
for _, container := range containers {
|
||||
var cPatch containerPatch
|
||||
cPatch.Name = container.Name
|
||||
cPatch.Resources.Requests.CPU = container.Resources.CPUReq
|
||||
cPatch.Resources.Requests.Memory = container.Resources.MemReq
|
||||
cPatch.Resources.Limits.CPU = container.Resources.CPULim
|
||||
cPatch.Resources.Limits.Memory = container.Resources.MemLim
|
||||
|
||||
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
|
||||
}
|
||||
|
||||
patchBytes, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(patchBytes), nil
|
||||
}
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -19,11 +19,13 @@ package pod
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
psaapi "k8s.io/pod-security-admission/api"
|
||||
psapolicy "k8s.io/pod-security-admission/policy"
|
||||
@ -275,3 +277,33 @@ func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerSt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCgroupValue verifies that the given cgroup path has the expected value in
|
||||
// the specified container of the pod. It execs into the container to retrive the
|
||||
// cgroup value and compares it against the expected value.
|
||||
func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath, expectedCgValue string) error {
|
||||
cmd := fmt.Sprintf("head -n 1 %s", cgPath)
|
||||
framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s",
|
||||
pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
|
||||
cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
|
||||
}
|
||||
cgValue = strings.Trim(cgValue, "\n")
|
||||
if cgValue != expectedCgValue {
|
||||
return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node.
|
||||
// TODO: Deduplicate this function with NPD cluster e2e test:
|
||||
// https://github.com/kubernetes/kubernetes/blob/2049360379bcc5d6467769cef112e6e492d3d2f0/test/e2e/node/node_problem_detector.go#L369
|
||||
func IsPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
|
||||
cmd := "mount -t cgroup2"
|
||||
out, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return len(out) != 0
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -475,7 +475,7 @@ func WaitForPodSuccessInNamespaceTimeout(ctx context.Context, c clientset.Interf
|
||||
ginkgo.By("Saw pod success")
|
||||
return true, nil
|
||||
case v1.PodFailed:
|
||||
return true, gomega.StopTrying(fmt.Sprintf("pod %q failed with status: %+v", podName, pod.Status))
|
||||
return true, gomega.StopTrying(fmt.Sprintf("pod %q failed with status: \n%s", podName, format.Object(pod.Status, 1)))
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
@ -809,6 +809,18 @@ func WaitForPodContainerStarted(ctx context.Context, c clientset.Interface, name
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodInitContainerStarted waits for the given Pod init container to start.
|
||||
func WaitForPodInitContainerStarted(ctx context.Context, c clientset.Interface, namespace, podName string, initContainerIndex int, timeout time.Duration) error {
|
||||
conditionDesc := fmt.Sprintf("init container %d started", initContainerIndex)
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if initContainerIndex > len(pod.Status.InitContainerStatuses)-1 {
|
||||
return false, nil
|
||||
}
|
||||
initContainerStatus := pod.Status.InitContainerStatuses[initContainerIndex]
|
||||
return *initContainerStatus.Started, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodFailedReason wait for pod failed reason in status, for example "SysctlForbidden".
|
||||
func WaitForPodFailedReason(ctx context.Context, c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error {
|
||||
conditionDesc := fmt.Sprintf("failed with reason %s", reason)
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e/framework/pv/wait.go
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/test/e2e/framework/pv/wait.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/utils/format"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// WaitForPersistentVolumeClaimModified waits the given timeout duration for the specified claim to become bound with the
|
||||
// desired volume attributes class.
|
||||
// Returns an error if timeout occurs first.
|
||||
func WaitForPersistentVolumeClaimModified(ctx context.Context, c clientset.Interface, claim *v1.PersistentVolumeClaim, timeout time.Duration) error {
|
||||
desiredClass := ptr.Deref(claim.Spec.VolumeAttributesClassName, "")
|
||||
|
||||
var match = func(claim *v1.PersistentVolumeClaim) bool {
|
||||
for _, condition := range claim.Status.Conditions {
|
||||
// conditions that indicate the claim is being modified
|
||||
// or has an error when modifying the volume
|
||||
if condition.Type == v1.PersistentVolumeClaimVolumeModifyVolumeError ||
|
||||
condition.Type == v1.PersistentVolumeClaimVolumeModifyingVolume {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// check if claim is bound with the desired volume attributes class
|
||||
currentClass := ptr.Deref(claim.Status.CurrentVolumeAttributesClassName, "")
|
||||
return claim.Status.Phase == v1.ClaimBound &&
|
||||
desiredClass == currentClass && claim.Status.ModifyVolumeStatus == nil
|
||||
}
|
||||
|
||||
if match(claim) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return framework.Gomega().
|
||||
Eventually(ctx, framework.GetObject(c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get, claim.Name, metav1.GetOptions{})).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(func(claim *v1.PersistentVolumeClaim) (func() string, error) {
|
||||
if match(claim) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected claim's status to be modified with the given VolumeAttirbutesClass %s, got instead:\n%s", desiredClass, format.Object(claim, 1))
|
||||
}, nil
|
||||
}))
|
||||
}
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -46,10 +46,13 @@ func Skipf(format string, args ...interface{}) {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Skip is an alias for ginkgo.Skip.
|
||||
var Skip = ginkgo.Skip
|
||||
|
||||
// SkipUnlessAtLeast skips if the value is less than the minValue.
|
||||
func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
if value < minValue {
|
||||
skipInternalf(1, message)
|
||||
skipInternalf(1, "%s", message)
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,6 +133,17 @@ func SkipUnlessMultizone(ctx context.Context, c clientset.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessAtLeastNZones skips if the cluster does not have n multizones.
|
||||
func SkipUnlessAtLeastNZones(ctx context.Context, c clientset.Interface, n int) {
|
||||
zones, err := e2enode.GetClusterZones(ctx, c)
|
||||
if err != nil {
|
||||
skipInternalf(1, "Error listing cluster zones")
|
||||
}
|
||||
if zones.Len() < n {
|
||||
skipInternalf(1, "Requires >= %d zones", n)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfMultizone skips if the cluster has multizone.
|
||||
func SkipIfMultizone(ctx context.Context, c clientset.Interface) {
|
||||
zones, err := e2enode.GetClusterZones(ctx, c)
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -68,6 +68,9 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
||||
switch provider {
|
||||
case "gce", "gke", "kubemark":
|
||||
keyfile = os.Getenv("GCE_SSH_KEY")
|
||||
if keyfile == "" {
|
||||
keyfile = os.Getenv("GCE_SSH_PRIVATE_KEY_FILE")
|
||||
}
|
||||
if keyfile == "" {
|
||||
keyfile = "google_compute_engine"
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -277,6 +277,8 @@ type NodeTestContextType struct {
|
||||
ExtraEnvs map[string]string
|
||||
// StandaloneMode indicates whether the test is running kubelet in a standalone mode.
|
||||
StandaloneMode bool
|
||||
// CriProxyEnabled indicates whether enable CRI API proxy for failure injection.
|
||||
CriProxyEnabled bool
|
||||
}
|
||||
|
||||
// CloudConfig holds the cloud configuration for e2e test suites.
|
||||
@ -508,6 +510,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
gomega.SetDefaultConsistentlyPollingInterval(t.timeouts.Poll)
|
||||
gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart)
|
||||
gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort)
|
||||
gomega.EnforceDefaultTimeoutsWhenUsingContexts()
|
||||
|
||||
// ginkgo.PreviewSpecs will expand all nodes and thus may find new bugs.
|
||||
report := ginkgo.PreviewSpecs("Kubernetes e2e test statistics")
|
||||
|
45
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -128,6 +128,9 @@ const (
|
||||
|
||||
// SnapshotDeleteTimeout is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDeleteTimeout = 5 * time.Minute
|
||||
|
||||
// ControlPlaneLabel is valid label for kubeadm based clusters like kops ONLY
|
||||
ControlPlaneLabel = "node-role.kubernetes.io/control-plane"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -582,7 +585,9 @@ func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err e
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
|
||||
// cmd.Args contains command itself as 0th argument, so it's sufficient to
|
||||
// print 1st and latter arguments
|
||||
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " "))
|
||||
err = cmd.Start()
|
||||
return
|
||||
}
|
||||
@ -662,6 +667,17 @@ func RunCmdEnv(env []string, command string, args ...string) (string, string, er
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
// GetNodeExternalIPs returns a list of external ip address(es) if any for a node
|
||||
func GetNodeExternalIPs(node *v1.Node) (ips []string) {
|
||||
for j := range node.Status.Addresses {
|
||||
nodeAddress := &node.Status.Addresses[j]
|
||||
if nodeAddress.Type == v1.NodeExternalIP && nodeAddress.Address != "" {
|
||||
ips = append(ips, nodeAddress.Address)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getControlPlaneAddresses returns the externalIP, internalIP and hostname fields of control plane nodes.
|
||||
// If any of these is unavailable, empty slices are returned.
|
||||
func getControlPlaneAddresses(ctx context.Context, c clientset.Interface) ([]string, []string, []string) {
|
||||
@ -694,6 +710,33 @@ func getControlPlaneAddresses(ctx context.Context, c clientset.Interface) ([]str
|
||||
return externalIPs, internalIPs, hostnames
|
||||
}
|
||||
|
||||
// GetControlPlaneNodes returns a list of control plane nodes
|
||||
func GetControlPlaneNodes(ctx context.Context, c clientset.Interface) *v1.NodeList {
|
||||
allNodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
ExpectNoError(err, "error reading all nodes")
|
||||
|
||||
var cpNodes v1.NodeList
|
||||
|
||||
for _, node := range allNodes.Items {
|
||||
// Check for the control plane label
|
||||
if _, hasLabel := node.Labels[ControlPlaneLabel]; hasLabel {
|
||||
cpNodes.Items = append(cpNodes.Items, node)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for the specific taint
|
||||
for _, taint := range node.Spec.Taints {
|
||||
// NOTE the taint key is the same as the control plane label
|
||||
if taint.Key == ControlPlaneLabel && taint.Effect == v1.TaintEffectNoSchedule {
|
||||
cpNodes.Items = append(cpNodes.Items, node)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &cpNodes
|
||||
}
|
||||
|
||||
// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane.
|
||||
// It may return internal and external IPs, even if we expect for
|
||||
// e.g. internal IPs to be used (issue #56787), so that we can be
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -176,6 +176,24 @@ func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, names
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// Restart the passed-in nfs-server by issuing a `rpc.nfsd 1` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads from
|
||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||
func RestartNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const startcmd = "rpc.nfsd 1"
|
||||
_, _, err := PodExec(f, serverPod, startcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Stop the passed-in nfs-server by issuing a `rpc.nfsd 0` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads to 0,
|
||||
// thus closing all open nfs connections.
|
||||
func StopNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const stopcmd = "rpc.nfsd 0 && for i in $(seq 200); do rpcinfo -p | grep -q nfs || break; sleep 1; done"
|
||||
_, _, err := PodExec(f, serverPod, stopcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
|
Reference in New Issue
Block a user