mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
1
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -35,7 +35,6 @@ rules:
|
||||
- selectorRegexp: ^github.com/|^gopkg.in
|
||||
allowedPrefixes: [
|
||||
"gopkg.in/inf.v0",
|
||||
"gopkg.in/yaml.v2",
|
||||
"gopkg.in/evanphx/json-patch.v4",
|
||||
"github.com/blang/semver/",
|
||||
"github.com/davecgh/go-spew/spew",
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/debug/resource_usage_gatherer.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
@ -595,7 +596,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
}
|
||||
}
|
||||
if len(violatedConstraints) > 0 {
|
||||
return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n"))
|
||||
return &summary, errors.New(strings.Join(violatedConstraints, "\n"))
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -294,12 +294,22 @@ func (f *FailureError) backtrace() {
|
||||
var ErrFailure error = FailureError{}
|
||||
|
||||
// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
//
|
||||
// As in [gomega.Expect], the explain parameters can be used to provide
|
||||
// additional information in case of a failure in one of these two ways:
|
||||
// - A single string is used as first line of the failure message directly.
|
||||
// - A string with additional parameters is passed through [fmt.Sprintf].
|
||||
func ExpectNoError(err error, explain ...interface{}) {
|
||||
ExpectNoErrorWithOffset(1, err, explain...)
|
||||
}
|
||||
|
||||
// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
//
|
||||
// As in [gomega.Expect], the explain parameters can be used to provide
|
||||
// additional information in case of a failure in one of these two ways:
|
||||
// - A single string is used as first line of the failure message directly.
|
||||
// - A string with additional parameters is passed through [fmt.Sprintf].
|
||||
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
@ -57,7 +57,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
|
||||
if desc != "" {
|
||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||
}
|
||||
Logf(msg)
|
||||
Logf("%s", msg)
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.Flakes = append(f.Flakes, msg)
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -311,7 +311,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
switch TestContext.OutputPrintType {
|
||||
case "hr":
|
||||
if TestContext.ReportDir == "" {
|
||||
Logf(summaries[i].PrintHumanReadable())
|
||||
Logf("%s", summaries[i].PrintHumanReadable())
|
||||
} else {
|
||||
// TODO: learn to extract test name and append it to the kind instead of timestamp.
|
||||
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
|
||||
@ -393,7 +393,7 @@ func (f *Framework) AfterEach(ctx context.Context) {
|
||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||
}
|
||||
Failf(strings.Join(messages, ","))
|
||||
Fail(strings.Join(messages, ","))
|
||||
}
|
||||
}()
|
||||
|
||||
|
55
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
55
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
@ -18,19 +18,22 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -166,3 +169,51 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsARM64 checks whether the k8s Node has arm64 arch.
|
||||
func IsARM64(node *v1.Node) bool {
|
||||
arch, ok := node.Labels["kubernetes.io/arch"]
|
||||
if ok {
|
||||
return arch == "arm64"
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// AddExtendedResource adds a fake resource to the Node.
|
||||
func AddExtendedResource(ctx context.Context, clientSet clientset.Interface, nodeName string, extendedResourceName v1.ResourceName, extendedResourceQuantity resource.Quantity) {
|
||||
extendedResource := v1.ResourceName(extendedResourceName)
|
||||
|
||||
ginkgo.By("Adding a custom resource")
|
||||
extendedResourceList := v1.ResourceList{
|
||||
extendedResource: extendedResourceQuantity,
|
||||
}
|
||||
patchPayload, err := json.Marshal(v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: extendedResourceList,
|
||||
Allocatable: extendedResourceList,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to marshal node JSON")
|
||||
|
||||
_, err = clientSet.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, []byte(patchPayload), metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// RemoveExtendedResource removes a fake resource from the Node.
|
||||
func RemoveExtendedResource(ctx context.Context, clientSet clientset.Interface, nodeName string, extendedResourceName v1.ResourceName) {
|
||||
extendedResource := v1.ResourceName(extendedResourceName)
|
||||
|
||||
ginkgo.By("Removing a custom resource")
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
node, err := clientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node %s: %w", nodeName, err)
|
||||
}
|
||||
delete(node.Status.Capacity, extendedResource)
|
||||
delete(node.Status.Allocatable, extendedResource)
|
||||
_, err = clientSet.CoreV1().Nodes().UpdateStatus(ctx, node, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
}
|
||||
if !silent {
|
||||
framework.Logf(msg)
|
||||
framework.Logf("%s", msg)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -495,6 +495,16 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetNodeHeartbeatTime returns the timestamp of the last status update of the node.
|
||||
func GetNodeHeartbeatTime(node *v1.Node) metav1.Time {
|
||||
for _, condition := range node.Status.Conditions {
|
||||
if condition.Type == v1.NodeReady {
|
||||
return condition.LastHeartbeatTime
|
||||
}
|
||||
}
|
||||
return metav1.Time{}
|
||||
}
|
||||
|
||||
// PodNodePairs return podNode pairs for all pods in a namespace
|
||||
func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodNode, error) {
|
||||
var result []PodNode
|
||||
@ -822,6 +832,6 @@ func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
if taintExists(nodeUpdated.Spec.Taints, taint) {
|
||||
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
framework.Fail("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
}
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -160,6 +161,23 @@ func WaitForNodeSchedulable(ctx context.Context, c clientset.Interface, name str
|
||||
return false
|
||||
}
|
||||
|
||||
// WaitForNodeHeartbeatAfter waits up to timeout for node to send the next
|
||||
// heartbeat after the given timestamp.
|
||||
//
|
||||
// To ensure the node status is posted by a restarted kubelet process,
|
||||
// after should be retrieved by [GetNodeHeartbeatTime] while the kubelet is down.
|
||||
func WaitForNodeHeartbeatAfter(ctx context.Context, c clientset.Interface, name string, after metav1.Time, timeout time.Duration) {
|
||||
framework.Logf("Waiting up to %v for node %s to send a heartbeat after %v", timeout, name, after)
|
||||
gomega.Eventually(ctx, func() (time.Time, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't get node %s", name)
|
||||
return time.Time{}, err
|
||||
}
|
||||
return GetNodeHeartbeatTime(node).Time, nil
|
||||
}, timeout, poll).Should(gomega.BeTemporally(">", after.Time), "Node %s didn't send a heartbeat", name)
|
||||
}
|
||||
|
||||
// CheckReady waits up to timeout for cluster to has desired size and
|
||||
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
|
||||
func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
@ -58,8 +58,6 @@ func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options
|
||||
if !options.Quiet {
|
||||
framework.Logf("ExecWithOptions %+v", options)
|
||||
}
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load restclient config")
|
||||
|
||||
const tty = false
|
||||
|
||||
@ -68,8 +66,7 @@ func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options
|
||||
Resource("pods").
|
||||
Name(options.PodName).
|
||||
Namespace(options.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", options.ContainerName)
|
||||
SubResource("exec")
|
||||
req.VersionedParams(&v1.PodExecOptions{
|
||||
Container: options.ContainerName,
|
||||
Command: options.Command,
|
||||
@ -81,7 +78,7 @@ func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
err = execute(ctx, "POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
err := execute(ctx, "POST", req.URL(), f.ClientConfig(), options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
70
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
@ -157,6 +157,16 @@ func MatchContainerOutput(
|
||||
containerName string,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
|
||||
return MatchMultipleContainerOutputs(ctx, f, pod, map[string][]string{containerName: expectedOutput}, matcher)
|
||||
}
|
||||
|
||||
func MatchMultipleContainerOutputs(
|
||||
ctx context.Context,
|
||||
f *framework.Framework,
|
||||
pod *v1.Pod,
|
||||
expectedOutputs map[string][]string, // map of container name -> expected outputs
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
ns := pod.ObjectMeta.Namespace
|
||||
if ns == "" {
|
||||
ns = f.Namespace.Name
|
||||
@ -193,24 +203,26 @@ func MatchContainerOutput(
|
||||
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
|
||||
}
|
||||
|
||||
framework.Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
for cName, expectedOutput := range expectedOutputs {
|
||||
framework.Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, cName, err)
|
||||
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, containerName)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, cName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %w", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, cName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, cName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %w", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,7 +240,11 @@ func TestContainerOutput(ctx context.Context, f *framework.Framework, scenarioNa
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func TestContainerOutputRegexp(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
TestContainerOutputsRegexp(ctx, f, scenarioName, pod, map[int][]string{containerIndex: expectedOutput})
|
||||
}
|
||||
|
||||
func TestContainerOutputsRegexp(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, expectedOutputs map[int][]string) {
|
||||
TestContainerOutputsMatcher(ctx, f, scenarioName, pod, expectedOutputs, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// TestContainerOutputMatcher runs the given pod in the given namespace and waits
|
||||
@ -246,3 +262,23 @@ func TestContainerOutputMatcher(ctx context.Context, f *framework.Framework,
|
||||
}
|
||||
framework.ExpectNoError(MatchContainerOutput(ctx, f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
|
||||
}
|
||||
|
||||
func TestContainerOutputsMatcher(ctx context.Context, f *framework.Framework,
|
||||
scenarioName string,
|
||||
pod *v1.Pod,
|
||||
expectedOutputs map[int][]string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
|
||||
|
||||
expectedNameOutputs := make(map[string][]string, len(expectedOutputs))
|
||||
for containerIndex, expectedOutput := range expectedOutputs {
|
||||
expectedOutput := expectedOutput
|
||||
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
|
||||
framework.Failf("Invalid container index: %d", containerIndex)
|
||||
}
|
||||
expectedNameOutputs[pod.Spec.Containers[containerIndex].Name] = expectedOutput
|
||||
}
|
||||
framework.ExpectNoError(MatchMultipleContainerOutputs(ctx, f, pod, expectedNameOutputs, matcher))
|
||||
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
@ -266,7 +266,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
}
|
||||
// If the image policy is not PullAlways, the image must be in the pre-pull list and
|
||||
// pre-pulled.
|
||||
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image)
|
||||
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrueBecause("Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image))
|
||||
// Do not pull images during the tests because the images in pre-pull list should have
|
||||
// been prepulled.
|
||||
c.ImagePullPolicy = v1.PullNever
|
||||
@ -308,8 +308,13 @@ func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time
|
||||
|
||||
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
|
||||
func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) {
|
||||
return c.WaitForErrorEventOrSuccessWithTimeout(ctx, pod, framework.PodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForErrorEventOrSuccessWithTimeout waits for pod to succeed or an error event for that pod for a specified time
|
||||
func (c *PodClient) WaitForErrorEventOrSuccessWithTimeout(ctx context.Context, pod *v1.Pod, timeout time.Duration) (*v1.Event, error) {
|
||||
var ev *v1.Event
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %w", err)
|
||||
|
397
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go
generated
vendored
Normal file
397
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resize.go
generated
vendored
Normal file
@ -0,0 +1,397 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
CgroupCPUPeriod string = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
|
||||
CgroupCPUShares string = "/sys/fs/cgroup/cpu/cpu.shares"
|
||||
CgroupCPUQuota string = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
|
||||
CgroupMemLimit string = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
|
||||
Cgroupv2MemLimit string = "/sys/fs/cgroup/memory.max"
|
||||
Cgroupv2MemRequest string = "/sys/fs/cgroup/memory.min"
|
||||
Cgroupv2CPULimit string = "/sys/fs/cgroup/cpu.max"
|
||||
Cgroupv2CPURequest string = "/sys/fs/cgroup/cpu.weight"
|
||||
CPUPeriod string = "100000"
|
||||
MinContainerRuntimeVersion string = "1.6.9"
|
||||
)
|
||||
|
||||
var (
|
||||
podOnCgroupv2Node *bool
|
||||
)
|
||||
|
||||
type ContainerResources struct {
|
||||
CPUReq string
|
||||
CPULim string
|
||||
MemReq string
|
||||
MemLim string
|
||||
EphStorReq string
|
||||
EphStorLim string
|
||||
ExtendedResourceReq string
|
||||
ExtendedResourceLim string
|
||||
}
|
||||
|
||||
func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements {
|
||||
if cr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var lim, req v1.ResourceList
|
||||
if cr.CPULim != "" || cr.MemLim != "" || cr.EphStorLim != "" {
|
||||
lim = make(v1.ResourceList)
|
||||
}
|
||||
if cr.CPUReq != "" || cr.MemReq != "" || cr.EphStorReq != "" {
|
||||
req = make(v1.ResourceList)
|
||||
}
|
||||
if cr.CPULim != "" {
|
||||
lim[v1.ResourceCPU] = resource.MustParse(cr.CPULim)
|
||||
}
|
||||
if cr.MemLim != "" {
|
||||
lim[v1.ResourceMemory] = resource.MustParse(cr.MemLim)
|
||||
}
|
||||
if cr.EphStorLim != "" {
|
||||
lim[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorLim)
|
||||
}
|
||||
if cr.CPUReq != "" {
|
||||
req[v1.ResourceCPU] = resource.MustParse(cr.CPUReq)
|
||||
}
|
||||
if cr.MemReq != "" {
|
||||
req[v1.ResourceMemory] = resource.MustParse(cr.MemReq)
|
||||
}
|
||||
if cr.EphStorReq != "" {
|
||||
req[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorReq)
|
||||
}
|
||||
return &v1.ResourceRequirements{Limits: lim, Requests: req}
|
||||
}
|
||||
|
||||
type ResizableContainerInfo struct {
|
||||
Name string
|
||||
Resources *ContainerResources
|
||||
CPUPolicy *v1.ResourceResizeRestartPolicy
|
||||
MemPolicy *v1.ResourceResizeRestartPolicy
|
||||
RestartCount int32
|
||||
}
|
||||
|
||||
type containerPatch struct {
|
||||
Name string `json:"name"`
|
||||
Resources struct {
|
||||
Requests struct {
|
||||
CPU string `json:"cpu,omitempty"`
|
||||
Memory string `json:"memory,omitempty"`
|
||||
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||
} `json:"requests"`
|
||||
Limits struct {
|
||||
CPU string `json:"cpu,omitempty"`
|
||||
Memory string `json:"memory,omitempty"`
|
||||
EphStor string `json:"ephemeral-storage,omitempty"`
|
||||
} `json:"limits"`
|
||||
} `json:"resources"`
|
||||
}
|
||||
|
||||
type patchSpec struct {
|
||||
Spec struct {
|
||||
Containers []containerPatch `json:"containers"`
|
||||
} `json:"spec"`
|
||||
}
|
||||
|
||||
func getTestResourceInfo(tcInfo ResizableContainerInfo) (res v1.ResourceRequirements, resizePol []v1.ContainerResizePolicy) {
|
||||
if tcInfo.Resources != nil {
|
||||
res = *tcInfo.Resources.ResourceRequirements()
|
||||
}
|
||||
if tcInfo.CPUPolicy != nil {
|
||||
cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy}
|
||||
resizePol = append(resizePol, cpuPol)
|
||||
}
|
||||
if tcInfo.MemPolicy != nil {
|
||||
memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy}
|
||||
resizePol = append(resizePol, memPol)
|
||||
}
|
||||
return res, resizePol
|
||||
}
|
||||
|
||||
func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
|
||||
noRestart := v1.NotRequired
|
||||
setDefaultPolicy := func(ci *ResizableContainerInfo) {
|
||||
if ci.CPUPolicy == nil {
|
||||
ci.CPUPolicy = &noRestart
|
||||
}
|
||||
if ci.MemPolicy == nil {
|
||||
ci.MemPolicy = &noRestart
|
||||
}
|
||||
}
|
||||
for i := range containers {
|
||||
setDefaultPolicy(&containers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
|
||||
cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
|
||||
res, resizePol := getTestResourceInfo(tcInfo)
|
||||
|
||||
tc := v1.Container{
|
||||
Name: tcInfo.Name,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", cmd},
|
||||
Resources: res,
|
||||
ResizePolicy: resizePol,
|
||||
}
|
||||
|
||||
return tc
|
||||
}
|
||||
|
||||
func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
|
||||
var testContainers []v1.Container
|
||||
|
||||
for _, ci := range tcInfo {
|
||||
tc := makeResizableContainer(ci)
|
||||
testContainers = append(testContainers, tc)
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{
|
||||
"time": timeStamp,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
OS: &v1.PodOS{Name: v1.Linux},
|
||||
Containers: testContainers,
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
var errs []error
|
||||
|
||||
if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) {
|
||||
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
|
||||
len(gotPod.Status.ContainerStatuses), len(wantCtrs))
|
||||
}
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
if gotCtrStatus.Name != ctr.Name {
|
||||
errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name))
|
||||
continue
|
||||
}
|
||||
if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(ctr.Resources)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", ctr.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework, pod *v1.Pod, tcInfo []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
if podOnCgroupv2Node == nil {
|
||||
value := IsPodOnCgroupv2Node(f, pod)
|
||||
podOnCgroupv2Node = &value
|
||||
}
|
||||
cgroupMemLimit := Cgroupv2MemLimit
|
||||
cgroupCPULimit := Cgroupv2CPULimit
|
||||
cgroupCPURequest := Cgroupv2CPURequest
|
||||
if !*podOnCgroupv2Node {
|
||||
cgroupMemLimit = CgroupMemLimit
|
||||
cgroupCPULimit = CgroupCPUQuota
|
||||
cgroupCPURequest = CgroupCPUShares
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for _, ci := range tcInfo {
|
||||
if ci.Resources == nil {
|
||||
continue
|
||||
}
|
||||
tc := makeResizableContainer(ci)
|
||||
if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
|
||||
var expectedCPUShares int64
|
||||
var expectedCPULimitString, expectedMemLimitString string
|
||||
expectedMemLimitInBytes := tc.Resources.Limits.Memory().Value()
|
||||
cpuRequest := tc.Resources.Requests.Cpu()
|
||||
cpuLimit := tc.Resources.Limits.Cpu()
|
||||
if cpuRequest.IsZero() && !cpuLimit.IsZero() {
|
||||
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuLimit.MilliValue()))
|
||||
} else {
|
||||
expectedCPUShares = int64(kubecm.MilliCPUToShares(cpuRequest.MilliValue()))
|
||||
}
|
||||
cpuQuota := kubecm.MilliCPUToQuota(cpuLimit.MilliValue(), kubecm.QuotaPeriod)
|
||||
if cpuLimit.IsZero() {
|
||||
cpuQuota = -1
|
||||
}
|
||||
expectedCPULimitString = strconv.FormatInt(cpuQuota, 10)
|
||||
expectedMemLimitString = strconv.FormatInt(expectedMemLimitInBytes, 10)
|
||||
if *podOnCgroupv2Node {
|
||||
if expectedCPULimitString == "-1" {
|
||||
expectedCPULimitString = "max"
|
||||
}
|
||||
expectedCPULimitString = fmt.Sprintf("%s %s", expectedCPULimitString, CPUPeriod)
|
||||
if expectedMemLimitString == "0" {
|
||||
expectedMemLimitString = "max"
|
||||
}
|
||||
// convert cgroup v1 cpu.shares value to cgroup v2 cpu.weight value
|
||||
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2254-cgroup-v2#phase-1-convert-from-cgroups-v1-settings-to-v2
|
||||
expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
|
||||
}
|
||||
if expectedMemLimitString != "0" {
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupMemLimit, expectedMemLimitString))
|
||||
}
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPULimit, expectedCPULimitString))
|
||||
errs = append(errs, VerifyCgroupValue(f, pod, ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func verifyContainerRestarts(pod *v1.Pod, expectedContainers []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
expectContainerRestarts := map[string]int32{}
|
||||
for _, ci := range expectedContainers {
|
||||
expectContainerRestarts[ci.Name] = ci.RestartCount
|
||||
}
|
||||
|
||||
errs := []error{}
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
expectedRestarts := expectContainerRestarts[cs.Name]
|
||||
if cs.RestartCount != expectedRestarts {
|
||||
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts))
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
// Wait for resize to complete.
|
||||
framework.ExpectNoError(WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "resize status cleared", f.Timeouts.PodStart,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
// This is a terminal resize state
|
||||
return false, fmt.Errorf("resize is infeasible")
|
||||
}
|
||||
return pod.Status.Resize == "", nil
|
||||
}), "pod should finish resizing")
|
||||
|
||||
resizedPod, err := framework.GetObject(podClient.Get, pod.Name, metav1.GetOptions{})(ctx)
|
||||
framework.ExpectNoError(err, "failed to get resized pod")
|
||||
return resizedPod
|
||||
}
|
||||
|
||||
func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v1.Pod, expectedContainers []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
// Put each error on a new line for readability.
|
||||
formatErrors := func(err error) error {
|
||||
var agg utilerrors.Aggregate
|
||||
if !errors.As(err, &agg) {
|
||||
return err
|
||||
}
|
||||
|
||||
errStrings := make([]string, len(agg.Errors()))
|
||||
for i, err := range agg.Errors() {
|
||||
errStrings[i] = err.Error()
|
||||
}
|
||||
return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n"))
|
||||
}
|
||||
// Verify Pod Containers Cgroup Values
|
||||
var errs []error
|
||||
if cgroupErrs := VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers); cgroupErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container cgroup values don't match expected: %w", formatErrors(cgroupErrs)))
|
||||
}
|
||||
if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
|
||||
}
|
||||
if restartErrs := verifyContainerRestarts(resizedPod, expectedContainers); restartErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
resizedPod.ManagedFields = nil // Suppress managed fields in error output.
|
||||
framework.ExpectNoError(formatErrors(utilerrors.NewAggregate(errs)),
|
||||
"Verifying pod resources resize state. Pod: %s", framework.PrettyPrintJSON(resizedPod))
|
||||
}
|
||||
}
|
||||
|
||||
// ResizeContainerPatch generates a patch string to resize the pod container.
|
||||
func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
|
||||
var patch patchSpec
|
||||
|
||||
for _, container := range containers {
|
||||
var cPatch containerPatch
|
||||
cPatch.Name = container.Name
|
||||
cPatch.Resources.Requests.CPU = container.Resources.CPUReq
|
||||
cPatch.Resources.Requests.Memory = container.Resources.MemReq
|
||||
cPatch.Resources.Limits.CPU = container.Resources.CPULim
|
||||
cPatch.Resources.Limits.Memory = container.Resources.MemLim
|
||||
|
||||
patch.Spec.Containers = append(patch.Spec.Containers, cPatch)
|
||||
}
|
||||
|
||||
patchBytes, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(patchBytes), nil
|
||||
}
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -19,11 +19,13 @@ package pod
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
psaapi "k8s.io/pod-security-admission/api"
|
||||
psapolicy "k8s.io/pod-security-admission/policy"
|
||||
@ -275,3 +277,33 @@ func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerSt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCgroupValue verifies that the given cgroup path has the expected value in
|
||||
// the specified container of the pod. It execs into the container to retrive the
|
||||
// cgroup value and compares it against the expected value.
|
||||
func VerifyCgroupValue(f *framework.Framework, pod *v1.Pod, cName, cgPath, expectedCgValue string) error {
|
||||
cmd := fmt.Sprintf("head -n 1 %s", cgPath)
|
||||
framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s",
|
||||
pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
|
||||
cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
|
||||
}
|
||||
cgValue = strings.Trim(cgValue, "\n")
|
||||
if cgValue != expectedCgValue {
|
||||
return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node.
|
||||
// TODO: Deduplicate this function with NPD cluster e2e test:
|
||||
// https://github.com/kubernetes/kubernetes/blob/2049360379bcc5d6467769cef112e6e492d3d2f0/test/e2e/node/node_problem_detector.go#L369
|
||||
func IsPodOnCgroupv2Node(f *framework.Framework, pod *v1.Pod) bool {
|
||||
cmd := "mount -t cgroup2"
|
||||
out, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return len(out) != 0
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -475,7 +475,7 @@ func WaitForPodSuccessInNamespaceTimeout(ctx context.Context, c clientset.Interf
|
||||
ginkgo.By("Saw pod success")
|
||||
return true, nil
|
||||
case v1.PodFailed:
|
||||
return true, gomega.StopTrying(fmt.Sprintf("pod %q failed with status: %+v", podName, pod.Status))
|
||||
return true, gomega.StopTrying(fmt.Sprintf("pod %q failed with status: \n%s", podName, format.Object(pod.Status, 1)))
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
@ -809,6 +809,18 @@ func WaitForPodContainerStarted(ctx context.Context, c clientset.Interface, name
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodInitContainerStarted waits for the given Pod init container to start.
|
||||
func WaitForPodInitContainerStarted(ctx context.Context, c clientset.Interface, namespace, podName string, initContainerIndex int, timeout time.Duration) error {
|
||||
conditionDesc := fmt.Sprintf("init container %d started", initContainerIndex)
|
||||
return WaitForPodCondition(ctx, c, namespace, podName, conditionDesc, timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if initContainerIndex > len(pod.Status.InitContainerStatuses)-1 {
|
||||
return false, nil
|
||||
}
|
||||
initContainerStatus := pod.Status.InitContainerStatuses[initContainerIndex]
|
||||
return *initContainerStatus.Started, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodFailedReason wait for pod failed reason in status, for example "SysctlForbidden".
|
||||
func WaitForPodFailedReason(ctx context.Context, c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error {
|
||||
conditionDesc := fmt.Sprintf("failed with reason %s", reason)
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e/framework/pv/wait.go
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/test/e2e/framework/pv/wait.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/utils/format"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// WaitForPersistentVolumeClaimModified waits the given timeout duration for the specified claim to become bound with the
|
||||
// desired volume attributes class.
|
||||
// Returns an error if timeout occurs first.
|
||||
func WaitForPersistentVolumeClaimModified(ctx context.Context, c clientset.Interface, claim *v1.PersistentVolumeClaim, timeout time.Duration) error {
|
||||
desiredClass := ptr.Deref(claim.Spec.VolumeAttributesClassName, "")
|
||||
|
||||
var match = func(claim *v1.PersistentVolumeClaim) bool {
|
||||
for _, condition := range claim.Status.Conditions {
|
||||
// conditions that indicate the claim is being modified
|
||||
// or has an error when modifying the volume
|
||||
if condition.Type == v1.PersistentVolumeClaimVolumeModifyVolumeError ||
|
||||
condition.Type == v1.PersistentVolumeClaimVolumeModifyingVolume {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// check if claim is bound with the desired volume attributes class
|
||||
currentClass := ptr.Deref(claim.Status.CurrentVolumeAttributesClassName, "")
|
||||
return claim.Status.Phase == v1.ClaimBound &&
|
||||
desiredClass == currentClass && claim.Status.ModifyVolumeStatus == nil
|
||||
}
|
||||
|
||||
if match(claim) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return framework.Gomega().
|
||||
Eventually(ctx, framework.GetObject(c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get, claim.Name, metav1.GetOptions{})).
|
||||
WithTimeout(timeout).
|
||||
Should(framework.MakeMatcher(func(claim *v1.PersistentVolumeClaim) (func() string, error) {
|
||||
if match(claim) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return func() string {
|
||||
return fmt.Sprintf("expected claim's status to be modified with the given VolumeAttirbutesClass %s, got instead:\n%s", desiredClass, format.Object(claim, 1))
|
||||
}, nil
|
||||
}))
|
||||
}
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -46,10 +46,13 @@ func Skipf(format string, args ...interface{}) {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Skip is an alias for ginkgo.Skip.
|
||||
var Skip = ginkgo.Skip
|
||||
|
||||
// SkipUnlessAtLeast skips if the value is less than the minValue.
|
||||
func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
if value < minValue {
|
||||
skipInternalf(1, message)
|
||||
skipInternalf(1, "%s", message)
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,6 +133,17 @@ func SkipUnlessMultizone(ctx context.Context, c clientset.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessAtLeastNZones skips if the cluster does not have n multizones.
|
||||
func SkipUnlessAtLeastNZones(ctx context.Context, c clientset.Interface, n int) {
|
||||
zones, err := e2enode.GetClusterZones(ctx, c)
|
||||
if err != nil {
|
||||
skipInternalf(1, "Error listing cluster zones")
|
||||
}
|
||||
if zones.Len() < n {
|
||||
skipInternalf(1, "Requires >= %d zones", n)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfMultizone skips if the cluster has multizone.
|
||||
func SkipIfMultizone(ctx context.Context, c clientset.Interface) {
|
||||
zones, err := e2enode.GetClusterZones(ctx, c)
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -68,6 +68,9 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
||||
switch provider {
|
||||
case "gce", "gke", "kubemark":
|
||||
keyfile = os.Getenv("GCE_SSH_KEY")
|
||||
if keyfile == "" {
|
||||
keyfile = os.Getenv("GCE_SSH_PRIVATE_KEY_FILE")
|
||||
}
|
||||
if keyfile == "" {
|
||||
keyfile = "google_compute_engine"
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -277,6 +277,8 @@ type NodeTestContextType struct {
|
||||
ExtraEnvs map[string]string
|
||||
// StandaloneMode indicates whether the test is running kubelet in a standalone mode.
|
||||
StandaloneMode bool
|
||||
// CriProxyEnabled indicates whether enable CRI API proxy for failure injection.
|
||||
CriProxyEnabled bool
|
||||
}
|
||||
|
||||
// CloudConfig holds the cloud configuration for e2e test suites.
|
||||
@ -508,6 +510,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
gomega.SetDefaultConsistentlyPollingInterval(t.timeouts.Poll)
|
||||
gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart)
|
||||
gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort)
|
||||
gomega.EnforceDefaultTimeoutsWhenUsingContexts()
|
||||
|
||||
// ginkgo.PreviewSpecs will expand all nodes and thus may find new bugs.
|
||||
report := ginkgo.PreviewSpecs("Kubernetes e2e test statistics")
|
||||
|
45
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -128,6 +128,9 @@ const (
|
||||
|
||||
// SnapshotDeleteTimeout is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDeleteTimeout = 5 * time.Minute
|
||||
|
||||
// ControlPlaneLabel is valid label for kubeadm based clusters like kops ONLY
|
||||
ControlPlaneLabel = "node-role.kubernetes.io/control-plane"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -582,7 +585,9 @@ func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err e
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
|
||||
// cmd.Args contains command itself as 0th argument, so it's sufficient to
|
||||
// print 1st and latter arguments
|
||||
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " "))
|
||||
err = cmd.Start()
|
||||
return
|
||||
}
|
||||
@ -662,6 +667,17 @@ func RunCmdEnv(env []string, command string, args ...string) (string, string, er
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
// GetNodeExternalIPs returns a list of external ip address(es) if any for a node
|
||||
func GetNodeExternalIPs(node *v1.Node) (ips []string) {
|
||||
for j := range node.Status.Addresses {
|
||||
nodeAddress := &node.Status.Addresses[j]
|
||||
if nodeAddress.Type == v1.NodeExternalIP && nodeAddress.Address != "" {
|
||||
ips = append(ips, nodeAddress.Address)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getControlPlaneAddresses returns the externalIP, internalIP and hostname fields of control plane nodes.
|
||||
// If any of these is unavailable, empty slices are returned.
|
||||
func getControlPlaneAddresses(ctx context.Context, c clientset.Interface) ([]string, []string, []string) {
|
||||
@ -694,6 +710,33 @@ func getControlPlaneAddresses(ctx context.Context, c clientset.Interface) ([]str
|
||||
return externalIPs, internalIPs, hostnames
|
||||
}
|
||||
|
||||
// GetControlPlaneNodes returns a list of control plane nodes
|
||||
func GetControlPlaneNodes(ctx context.Context, c clientset.Interface) *v1.NodeList {
|
||||
allNodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
ExpectNoError(err, "error reading all nodes")
|
||||
|
||||
var cpNodes v1.NodeList
|
||||
|
||||
for _, node := range allNodes.Items {
|
||||
// Check for the control plane label
|
||||
if _, hasLabel := node.Labels[ControlPlaneLabel]; hasLabel {
|
||||
cpNodes.Items = append(cpNodes.Items, node)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for the specific taint
|
||||
for _, taint := range node.Spec.Taints {
|
||||
// NOTE the taint key is the same as the control plane label
|
||||
if taint.Key == ControlPlaneLabel && taint.Effect == v1.TaintEffectNoSchedule {
|
||||
cpNodes.Items = append(cpNodes.Items, node)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &cpNodes
|
||||
}
|
||||
|
||||
// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane.
|
||||
// It may return internal and external IPs, even if we expect for
|
||||
// e.g. internal IPs to be used (issue #56787), so that we can be
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -176,6 +176,24 @@ func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, names
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// Restart the passed-in nfs-server by issuing a `rpc.nfsd 1` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads from
|
||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||
func RestartNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const startcmd = "rpc.nfsd 1"
|
||||
_, _, err := PodExec(f, serverPod, startcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Stop the passed-in nfs-server by issuing a `rpc.nfsd 0` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads to 0,
|
||||
// thus closing all open nfs connections.
|
||||
func StopNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const stopcmd = "rpc.nfsd 0 && for i in $(seq 200); do rpcinfo -p | grep -q nfs || break; sleep 1; done"
|
||||
_, _, err := PodExec(f, serverPod, stopcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
|
81
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
81
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
@ -99,74 +98,50 @@ func StartPodLogs(ctx context.Context, f *framework.Framework, driverNamespace *
|
||||
|
||||
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
|
||||
// for the desired statues..
|
||||
// - First issues the command via `systemctl`
|
||||
// - If `systemctl` returns stderr "command not found, issues the command via `service`
|
||||
// - If `service` also returns stderr "command not found", the test is aborted.
|
||||
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
|
||||
func KubeletCommand(ctx context.Context, kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
command := ""
|
||||
systemctlPresent := false
|
||||
kubeletPid := ""
|
||||
|
||||
nodeIP, err := getHostAddress(ctx, c, pod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
framework.Logf("Checking if systemctl command is present")
|
||||
sshResult, err := e2essh.SSH(ctx, "systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
||||
systemctlPresent = true
|
||||
} else {
|
||||
command = fmt.Sprintf("service kubelet %s", string(kOp))
|
||||
}
|
||||
|
||||
commandTemplate := "systemctl %s kubelet"
|
||||
sudoPresent := isSudoPresent(ctx, nodeIP, framework.TestContext.Provider)
|
||||
if sudoPresent {
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
commandTemplate = "sudo " + commandTemplate
|
||||
}
|
||||
|
||||
if kOp == KRestart {
|
||||
kubeletPid = getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent)
|
||||
runCmd := func(cmd string) {
|
||||
command := fmt.Sprintf(commandTemplate, cmd)
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
sshResult, err := e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
e2essh.LogResult(sshResult)
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", cmd, sshResult)
|
||||
}
|
||||
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
sshResult, err = e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
e2essh.LogResult(sshResult)
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
|
||||
|
||||
if kOp == KStop || kOp == KRestart {
|
||||
runCmd("stop")
|
||||
}
|
||||
if kOp == KStop {
|
||||
if ok := e2enode.WaitForNodeToBeNotReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
|
||||
}
|
||||
return
|
||||
}
|
||||
if kOp == KRestart {
|
||||
// Wait for a minute to check if kubelet Pid is getting changed
|
||||
isPidChanged := false
|
||||
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
|
||||
if ctx.Err() != nil {
|
||||
framework.Fail("timed out waiting for Kubelet POD change")
|
||||
}
|
||||
kubeletPidAfterRestart := getKubeletMainPid(ctx, nodeIP, sudoPresent, systemctlPresent)
|
||||
if kubeletPid != kubeletPidAfterRestart {
|
||||
isPidChanged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isPidChanged {
|
||||
framework.Fail("Kubelet PID remained unchanged after restarting Kubelet")
|
||||
}
|
||||
|
||||
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||
time.Sleep(30 * time.Second)
|
||||
if kOp == KStart && getKubeletRunning(ctx, nodeIP) {
|
||||
framework.Logf("Kubelet is already running on node %q", pod.Spec.NodeName)
|
||||
// Just skip. Or we cannot get a new heartbeat in time.
|
||||
return
|
||||
}
|
||||
if kOp == KStart || kOp == KRestart {
|
||||
// For kubelet start and restart operations, Wait until Node becomes Ready
|
||||
if ok := e2enode.WaitForNodeToBeReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
||||
}
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
heartbeatTime := e2enode.GetNodeHeartbeatTime(node)
|
||||
|
||||
runCmd("start")
|
||||
// Wait for next heartbeat, which must be sent by the new kubelet process.
|
||||
e2enode.WaitForNodeHeartbeatAfter(ctx, c, pod.Spec.NodeName, heartbeatTime, NodeStateTimeout)
|
||||
// Then wait until Node with new process becomes Ready.
|
||||
if ok := e2enode.WaitForNodeToBeReady(ctx, c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -73,24 +73,16 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
|
||||
gomega.Expect(expectedFSGroup).To(gomega.Equal(fsGroupResult), "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
|
||||
}
|
||||
|
||||
// getKubeletMainPid return the Main PID of the Kubelet Process
|
||||
func getKubeletMainPid(ctx context.Context, nodeIP string, sudoPresent bool, systemctlPresent bool) string {
|
||||
command := ""
|
||||
if systemctlPresent {
|
||||
command = "systemctl status kubelet | grep 'Main PID'"
|
||||
} else {
|
||||
command = "service kubelet status | grep 'Main PID'"
|
||||
}
|
||||
if sudoPresent {
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
// getKubeletRunning return if the kubelet is running or not
|
||||
func getKubeletRunning(ctx context.Context, nodeIP string) bool {
|
||||
command := "systemctl show kubelet --property ActiveState --value"
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
sshResult, err := e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
|
||||
e2essh.LogResult(sshResult)
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
|
||||
gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty")
|
||||
return sshResult.Stdout
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet status")
|
||||
gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet status should not be Empty")
|
||||
return strings.TrimSpace(sshResult.Stdout) == "active"
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
||||
@ -104,6 +96,9 @@ func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interf
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(ctx, KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Wait 20s for the volume to become stable")
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
@ -121,6 +116,9 @@ func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interfac
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(ctx, KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Wait 20s for the volume to become stable")
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
|
102
vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go
generated
vendored
Normal file
102
vendor/k8s.io/kubernetes/test/e2e/storage/utils/volume_group_snapshot.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
// VolumeGroupSnapshot is the group snapshot api
|
||||
VolumeGroupSnapshotAPIGroup = "groupsnapshot.storage.k8s.io"
|
||||
// VolumeGroupSnapshotAPIVersion is the group snapshot api version
|
||||
VolumeGroupSnapshotAPIVersion = "groupsnapshot.storage.k8s.io/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// VolumeGroupSnapshotGVR is GroupVersionResource for volumegroupsnapshots
|
||||
VolumeGroupSnapshotGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshots"}
|
||||
// VolumeGroupSnapshotClassGVR is GroupVersionResource for volumegroupsnapshotsclasses
|
||||
VolumeGroupSnapshotClassGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshotclasses"}
|
||||
)
|
||||
|
||||
// WaitForVolumeGroupSnapshotReady waits for a VolumeGroupSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||
func WaitForVolumeGroupSnapshotReady(ctx context.Context, c dynamic.Interface, ns string, volumeGroupSnapshotName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for VolumeGroupSnapshot %s to become ready", timeout, volumeGroupSnapshotName)
|
||||
|
||||
if successful := WaitUntil(poll, timeout, func() bool {
|
||||
volumeGroupSnapshot, err := c.Resource(VolumeGroupSnapshotGVR).Namespace(ns).Get(ctx, volumeGroupSnapshotName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get group snapshot %q, retrying in %v. Error: %v", volumeGroupSnapshotName, poll, err)
|
||||
return false
|
||||
}
|
||||
|
||||
status := volumeGroupSnapshot.Object["status"]
|
||||
if status == nil {
|
||||
framework.Logf("VolumeGroupSnapshot %s found but is not ready.", volumeGroupSnapshotName)
|
||||
return false
|
||||
}
|
||||
value := status.(map[string]interface{})
|
||||
if value["readyToUse"] == true {
|
||||
framework.Logf("VolumeSnapshot %s found and is ready", volumeGroupSnapshotName)
|
||||
return true
|
||||
}
|
||||
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", volumeGroupSnapshotName)
|
||||
return false
|
||||
}); successful {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", volumeGroupSnapshotName, timeout)
|
||||
}
|
||||
|
||||
func GenerateVolumeGroupSnapshotClassSpec(
|
||||
snapshotter string,
|
||||
parameters map[string]string,
|
||||
ns string,
|
||||
) *unstructured.Unstructured {
|
||||
deletionPolicy, ok := parameters["deletionPolicy"]
|
||||
if !ok {
|
||||
deletionPolicy = "Delete"
|
||||
}
|
||||
volumeGroupSnapshotClass := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeGroupSnapshotClass",
|
||||
"apiVersion": VolumeGroupSnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
// Name must be unique, so let's base it on namespace name and use GenerateName
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns),
|
||||
},
|
||||
"driver": snapshotter,
|
||||
"parameters": parameters,
|
||||
"deletionPolicy": deletionPolicy,
|
||||
},
|
||||
}
|
||||
|
||||
return volumeGroupSnapshotClass
|
||||
}
|
21
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-rc.yaml
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-backend-rc.yaml
generated
vendored
@ -1,21 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: dns-backend
|
||||
labels:
|
||||
name: dns-backend
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: dns-backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: dns-backend
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-backend
|
||||
image: registry.k8s.io/example-dns-backend:v1
|
||||
ports:
|
||||
- name: backend-port
|
||||
containerPort: 8000
|
@ -1,9 +0,0 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: dns-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
selector:
|
||||
name: dns-backend
|
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-frontend-pod.yaml
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/cluster-dns/dns-frontend-pod.yaml
generated
vendored
@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: dns-frontend
|
||||
labels:
|
||||
name: dns-frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-frontend
|
||||
image: registry.k8s.io/example-dns-frontend:v1
|
||||
command:
|
||||
- python
|
||||
- client.py
|
||||
- http://dns-backend.development.svc.cluster.local:8000
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Never
|
11
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/dra/OWNERS
generated
vendored
Normal file
11
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/dra/OWNERS
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- klueska
|
||||
- pohly
|
||||
reviewers:
|
||||
- klueska
|
||||
- pohly
|
||||
- bart0sh
|
||||
labels:
|
||||
- sig/node
|
85
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/dra/dra-test-driver-proxy.yaml
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/dra/dra-test-driver-proxy.yaml
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
# This YAML file deploys the csi-driver-host-path on a number of nodes such
|
||||
# that it proxies all connections from kubelet (plugin registration and dynamic
|
||||
# resource allocation). The actual handling of those connections then happens
|
||||
# inside the e2e.test binary via test/e2e/storage/drivers/proxy. This approach
|
||||
# has the advantage that no separate container image with the test driver is
|
||||
# needed and that tests have full control over the driver, for example for
|
||||
# error injection.
|
||||
#
|
||||
# The csi-driver-host-path image is used because:
|
||||
# - it has the necessary proxy mode (https://github.com/kubernetes-csi/csi-driver-host-path/commit/65480fc74d550a9a5aa81e850955cc20403857b1)
|
||||
# - its base image contains a shell (useful for creating files)
|
||||
# - the image is already a dependency of e2e.test
|
||||
|
||||
kind: ReplicaSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: dra-test-driver
|
||||
labels:
|
||||
app.kubernetes.io/instance: test-driver.dra.k8s.io
|
||||
app.kubernetes.io/part-of: dra-test-driver
|
||||
app.kubernetes.io/name: dra-test-driver-kubelet-plugin
|
||||
app.kubernetes.io/component: kubelet-plugin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: test-driver.dra.k8s.io
|
||||
app.kubernetes.io/part-of: dra-test-driver
|
||||
app.kubernetes.io/name: dra-test-driver-kubelet-plugin
|
||||
app.kubernetes.io/component: kubelet-plugin
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: test-driver.dra.k8s.io
|
||||
app.kubernetes.io/part-of: dra-test-driver
|
||||
app.kubernetes.io/name: dra-test-driver-kubelet-plugin
|
||||
app.kubernetes.io/component: kubelet-plugin
|
||||
spec:
|
||||
# Ensure that all pods run on distinct nodes.
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: test-driver.dra.k8s.io
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
containers:
|
||||
- name: registrar
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=/plugins_registry/dra-test-driver-reg.sock"
|
||||
- "--proxy-endpoint=tcp://:9000"
|
||||
volumeMounts:
|
||||
- mountPath: /plugins_registry
|
||||
name: registration-dir
|
||||
|
||||
- name: plugin
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=/dra/dra-test-driver.sock"
|
||||
- "--proxy-endpoint=tcp://:9001"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /dra
|
||||
name: socket-dir
|
||||
- mountPath: /cdi
|
||||
name: cdi-dir
|
||||
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/run/cdi
|
||||
type: DirectoryOrCreate
|
||||
name: cdi-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: DirectoryOrCreate
|
||||
name: registration-dir
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/embed.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/embed.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
)
|
||||
|
||||
//go:embed cluster-dns flexvolume guestbook kubectl sample-device-plugin scheduling/nvidia-driver-installer.yaml statefulset storage-csi
|
||||
//go:embed dra flexvolume guestbook kubectl sample-device-plugin gpu statefulset storage-csi
|
||||
var e2eTestingManifestsFS embed.FS
|
||||
|
||||
func GetE2ETestingManifestsFS() e2etestfiles.EmbeddedFileSource {
|
||||
|
57
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/gpu/gce/nvidia-gpu-device-plugin.yaml
generated
vendored
Normal file
57
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/gpu/gce/nvidia-gpu-device-plugin.yaml
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: nvidia-gpu-device-plugin
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nvidia-gpu-device-plugin
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: cloud.google.com/gke-accelerator
|
||||
operator: Exists
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
volumes:
|
||||
- name: device-plugin
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/device-plugins
|
||||
- name: dev
|
||||
hostPath:
|
||||
path: /dev
|
||||
containers:
|
||||
- image: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:4b036e8844920336fa48f36edeb7d4398f426d6a934ba022848deed2edbf09aa"
|
||||
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
|
||||
name: nvidia-gpu-device-plugin
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: device-plugin
|
||||
mountPath: /device-plugin
|
||||
- name: dev
|
||||
mountPath: /dev
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,94 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
group: groupsnapshot.storage.k8s.io
|
||||
names:
|
||||
kind: VolumeGroupSnapshotClass
|
||||
listKind: VolumeGroupSnapshotClassList
|
||||
plural: volumegroupsnapshotclasses
|
||||
shortNames:
|
||||
- vgsclass
|
||||
- vgsclasses
|
||||
singular: volumegroupsnapshotclass
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .driver
|
||||
name: Driver
|
||||
type: string
|
||||
- description: Determines whether a VolumeGroupSnapshotContent created through
|
||||
the VolumeGroupSnapshotClass should be deleted when its bound VolumeGroupSnapshot
|
||||
is deleted.
|
||||
jsonPath: .deletionPolicy
|
||||
name: DeletionPolicy
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VolumeGroupSnapshotClass specifies parameters that a underlying storage system
|
||||
uses when creating a volume group snapshot. A specific VolumeGroupSnapshotClass
|
||||
is used by specifying its name in a VolumeGroupSnapshot object.
|
||||
VolumeGroupSnapshotClasses are non-namespaced.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
deletionPolicy:
|
||||
description: |-
|
||||
DeletionPolicy determines whether a VolumeGroupSnapshotContent created
|
||||
through the VolumeGroupSnapshotClass should be deleted when its bound
|
||||
VolumeGroupSnapshot is deleted.
|
||||
Supported values are "Retain" and "Delete".
|
||||
"Retain" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are kept.
|
||||
"Delete" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are deleted.
|
||||
Required.
|
||||
enum:
|
||||
- Delete
|
||||
- Retain
|
||||
type: string
|
||||
driver:
|
||||
description: |-
|
||||
Driver is the name of the storage driver expected to handle this VolumeGroupSnapshotClass.
|
||||
Required.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
parameters:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
Parameters is a key-value map with storage driver specific parameters for
|
||||
creating group snapshots.
|
||||
These values are opaque to Kubernetes and are passed directly to the driver.
|
||||
type: object
|
||||
required:
|
||||
- deletionPolicy
|
||||
- driver
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources: {}
|
@ -0,0 +1,335 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
group: groupsnapshot.storage.k8s.io
|
||||
names:
|
||||
kind: VolumeGroupSnapshotContent
|
||||
listKind: VolumeGroupSnapshotContentList
|
||||
plural: volumegroupsnapshotcontents
|
||||
shortNames:
|
||||
- vgsc
|
||||
- vgscs
|
||||
singular: volumegroupsnapshotcontent
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Indicates if all the individual snapshots in the group are ready
|
||||
to be used to restore a group of volumes.
|
||||
jsonPath: .status.readyToUse
|
||||
name: ReadyToUse
|
||||
type: boolean
|
||||
- description: Determines whether this VolumeGroupSnapshotContent and its physical
|
||||
group snapshot on the underlying storage system should be deleted when its
|
||||
bound VolumeGroupSnapshot is deleted.
|
||||
jsonPath: .spec.deletionPolicy
|
||||
name: DeletionPolicy
|
||||
type: string
|
||||
- description: Name of the CSI driver used to create the physical group snapshot
|
||||
on the underlying storage system.
|
||||
jsonPath: .spec.driver
|
||||
name: Driver
|
||||
type: string
|
||||
- description: Name of the VolumeGroupSnapshotClass from which this group snapshot
|
||||
was (or will be) created.
|
||||
jsonPath: .spec.volumeGroupSnapshotClassName
|
||||
name: VolumeGroupSnapshotClass
|
||||
type: string
|
||||
- description: Namespace of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent
|
||||
object is bound.
|
||||
jsonPath: .spec.volumeGroupSnapshotRef.namespace
|
||||
name: VolumeGroupSnapshotNamespace
|
||||
type: string
|
||||
- description: Name of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent
|
||||
object is bound.
|
||||
jsonPath: .spec.volumeGroupSnapshotRef.name
|
||||
name: VolumeGroupSnapshot
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VolumeGroupSnapshotContent represents the actual "on-disk" group snapshot object
|
||||
in the underlying storage system
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: |-
|
||||
Spec defines properties of a VolumeGroupSnapshotContent created by the underlying storage system.
|
||||
Required.
|
||||
properties:
|
||||
deletionPolicy:
|
||||
description: |-
|
||||
DeletionPolicy determines whether this VolumeGroupSnapshotContent and the
|
||||
physical group snapshot on the underlying storage system should be deleted
|
||||
when the bound VolumeGroupSnapshot is deleted.
|
||||
Supported values are "Retain" and "Delete".
|
||||
"Retain" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are kept.
|
||||
"Delete" means that the VolumeGroupSnapshotContent and its physical group
|
||||
snapshot on underlying storage system are deleted.
|
||||
For dynamically provisioned group snapshots, this field will automatically
|
||||
be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field
|
||||
defined in the corresponding VolumeGroupSnapshotClass.
|
||||
For pre-existing snapshots, users MUST specify this field when creating the
|
||||
VolumeGroupSnapshotContent object.
|
||||
Required.
|
||||
enum:
|
||||
- Delete
|
||||
- Retain
|
||||
type: string
|
||||
driver:
|
||||
description: |-
|
||||
Driver is the name of the CSI driver used to create the physical group snapshot on
|
||||
the underlying storage system.
|
||||
This MUST be the same as the name returned by the CSI GetPluginName() call for
|
||||
that driver.
|
||||
Required.
|
||||
type: string
|
||||
source:
|
||||
description: |-
|
||||
Source specifies whether the snapshot is (or should be) dynamically provisioned
|
||||
or already exists, and just requires a Kubernetes object representation.
|
||||
This field is immutable after creation.
|
||||
Required.
|
||||
properties:
|
||||
groupSnapshotHandles:
|
||||
description: |-
|
||||
GroupSnapshotHandles specifies the CSI "group_snapshot_id" of a pre-existing
|
||||
group snapshot and a list of CSI "snapshot_id" of pre-existing snapshots
|
||||
on the underlying storage system for which a Kubernetes object
|
||||
representation was (or should be) created.
|
||||
This field is immutable.
|
||||
properties:
|
||||
volumeGroupSnapshotHandle:
|
||||
description: |-
|
||||
VolumeGroupSnapshotHandle specifies the CSI "group_snapshot_id" of a pre-existing
|
||||
group snapshot on the underlying storage system for which a Kubernetes object
|
||||
representation was (or should be) created.
|
||||
This field is immutable.
|
||||
Required.
|
||||
type: string
|
||||
volumeSnapshotHandles:
|
||||
description: |-
|
||||
VolumeSnapshotHandles is a list of CSI "snapshot_id" of pre-existing
|
||||
snapshots on the underlying storage system for which Kubernetes objects
|
||||
representation were (or should be) created.
|
||||
This field is immutable.
|
||||
Required.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- volumeGroupSnapshotHandle
|
||||
- volumeSnapshotHandles
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: groupSnapshotHandles is immutable
|
||||
rule: self == oldSelf
|
||||
volumeHandles:
|
||||
description: |-
|
||||
VolumeHandles is a list of volume handles on the backend to be snapshotted
|
||||
together. It is specified for dynamic provisioning of the VolumeGroupSnapshot.
|
||||
This field is immutable.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-validations:
|
||||
- message: volumeHandles is immutable
|
||||
rule: self == oldSelf
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: volumeHandles is required once set
|
||||
rule: '!has(oldSelf.volumeHandles) || has(self.volumeHandles)'
|
||||
- message: groupSnapshotHandles is required once set
|
||||
rule: '!has(oldSelf.groupSnapshotHandles) || has(self.groupSnapshotHandles)'
|
||||
- message: exactly one of volumeHandles and groupSnapshotHandles must
|
||||
be set
|
||||
rule: (has(self.volumeHandles) && !has(self.groupSnapshotHandles))
|
||||
|| (!has(self.volumeHandles) && has(self.groupSnapshotHandles))
|
||||
volumeGroupSnapshotClassName:
|
||||
description: |-
|
||||
VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass from
|
||||
which this group snapshot was (or will be) created.
|
||||
Note that after provisioning, the VolumeGroupSnapshotClass may be deleted or
|
||||
recreated with different set of values, and as such, should not be referenced
|
||||
post-snapshot creation.
|
||||
For dynamic provisioning, this field must be set.
|
||||
This field may be unset for pre-provisioned snapshots.
|
||||
type: string
|
||||
volumeGroupSnapshotRef:
|
||||
description: |-
|
||||
VolumeGroupSnapshotRef specifies the VolumeGroupSnapshot object to which this
|
||||
VolumeGroupSnapshotContent object is bound.
|
||||
VolumeGroupSnapshot.Spec.VolumeGroupSnapshotContentName field must reference to
|
||||
this VolumeGroupSnapshotContent's name for the bidirectional binding to be valid.
|
||||
For a pre-existing VolumeGroupSnapshotContent object, name and namespace of the
|
||||
VolumeGroupSnapshot object MUST be provided for binding to happen.
|
||||
This field is immutable after creation.
|
||||
Required.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent.
|
||||
type: string
|
||||
fieldPath:
|
||||
description: |-
|
||||
If referring to a piece of an object instead of an entire object, this string
|
||||
should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
|
||||
For example, if the object reference is to a container within a pod, this would take on a value like:
|
||||
"spec.containers{name}" (where "name" refers to the name of the container that triggered
|
||||
the event) or if no container name is specified "spec.containers[2]" (container with
|
||||
index 2 in this pod). This syntax is chosen only to have some well-defined way of
|
||||
referencing a part of an object.
|
||||
TODO: this design is not final and this field is subject to change in the future.
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind of the referent.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
type: string
|
||||
namespace:
|
||||
description: |-
|
||||
Namespace of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
|
||||
type: string
|
||||
resourceVersion:
|
||||
description: |-
|
||||
Specific resourceVersion to which this reference is made, if any.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
|
||||
type: string
|
||||
uid:
|
||||
description: |-
|
||||
UID of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace
|
||||
must be set
|
||||
rule: has(self.name) && has(self.__namespace__)
|
||||
- message: volumeGroupSnapshotRef is immutable
|
||||
rule: self == oldSelf
|
||||
required:
|
||||
- deletionPolicy
|
||||
- driver
|
||||
- source
|
||||
- volumeGroupSnapshotRef
|
||||
type: object
|
||||
status:
|
||||
description: status represents the current information of a group snapshot.
|
||||
properties:
|
||||
creationTime:
|
||||
description: |-
|
||||
CreationTime is the timestamp when the point-in-time group snapshot is taken
|
||||
by the underlying storage system.
|
||||
If not specified, it indicates the creation time is unknown.
|
||||
If not specified, it means the readiness of a group snapshot is unknown.
|
||||
The format of this field is a Unix nanoseconds time encoded as an int64.
|
||||
On Unix, the command date +%s%N returns the current time in nanoseconds
|
||||
since 1970-01-01 00:00:00 UTC.
|
||||
format: int64
|
||||
type: integer
|
||||
error:
|
||||
description: |-
|
||||
Error is the last observed error during group snapshot creation, if any.
|
||||
Upon success after retry, this error field will be cleared.
|
||||
properties:
|
||||
message:
|
||||
description: |-
|
||||
message is a string detailing the encountered error during snapshot
|
||||
creation if specified.
|
||||
NOTE: message may be logged, and it should not contain sensitive
|
||||
information.
|
||||
type: string
|
||||
time:
|
||||
description: time is the timestamp when the error was encountered.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
pvVolumeSnapshotContentList:
|
||||
description: |-
|
||||
PVVolumeSnapshotContentList is the list of pairs of PV and
|
||||
VolumeSnapshotContent for this group snapshot
|
||||
The maximum number of allowed snapshots in the group is 100.
|
||||
items:
|
||||
description: |-
|
||||
PVVolumeSnapshotContentPair represent a pair of PV names and
|
||||
VolumeSnapshotContent names
|
||||
properties:
|
||||
persistentVolumeRef:
|
||||
description: PersistentVolumeRef is a reference to the persistent
|
||||
volume resource
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
volumeSnapshotContentRef:
|
||||
description: VolumeSnapshotContentRef is a reference to the
|
||||
volume snapshot content resource
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
type: array
|
||||
readyToUse:
|
||||
description: |-
|
||||
ReadyToUse indicates if all the individual snapshots in the group are ready to be
|
||||
used to restore a group of volumes.
|
||||
ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
|
||||
type: boolean
|
||||
volumeGroupSnapshotHandle:
|
||||
description: |-
|
||||
VolumeGroupSnapshotHandle is a unique id returned by the CSI driver
|
||||
to identify the VolumeGroupSnapshot on the storage system.
|
||||
If a storage system does not provide such an id, the
|
||||
CSI driver can choose to return the VolumeGroupSnapshot name.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
273
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml
generated
vendored
Normal file
273
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml
generated
vendored
Normal file
@ -0,0 +1,273 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068"
|
||||
controller-gen.kubebuilder.io/version: v0.15.0
|
||||
name: volumegroupsnapshots.groupsnapshot.storage.k8s.io
|
||||
spec:
|
||||
group: groupsnapshot.storage.k8s.io
|
||||
names:
|
||||
kind: VolumeGroupSnapshot
|
||||
listKind: VolumeGroupSnapshotList
|
||||
plural: volumegroupsnapshots
|
||||
shortNames:
|
||||
- vgs
|
||||
singular: volumegroupsnapshot
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Indicates if all the individual snapshots in the group are ready
|
||||
to be used to restore a group of volumes.
|
||||
jsonPath: .status.readyToUse
|
||||
name: ReadyToUse
|
||||
type: boolean
|
||||
- description: The name of the VolumeGroupSnapshotClass requested by the VolumeGroupSnapshot.
|
||||
jsonPath: .spec.volumeGroupSnapshotClassName
|
||||
name: VolumeGroupSnapshotClass
|
||||
type: string
|
||||
- description: Name of the VolumeGroupSnapshotContent object to which the VolumeGroupSnapshot
|
||||
object intends to bind to. Please note that verification of binding actually
|
||||
requires checking both VolumeGroupSnapshot and VolumeGroupSnapshotContent
|
||||
to ensure both are pointing at each other. Binding MUST be verified prior
|
||||
to usage of this object.
|
||||
jsonPath: .status.boundVolumeGroupSnapshotContentName
|
||||
name: VolumeGroupSnapshotContent
|
||||
type: string
|
||||
- description: Timestamp when the point-in-time group snapshot was taken by the
|
||||
underlying storage system.
|
||||
jsonPath: .status.creationTime
|
||||
name: CreationTime
|
||||
type: date
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: |-
|
||||
VolumeGroupSnapshot is a user's request for creating either a point-in-time
|
||||
group snapshot or binding to a pre-existing group snapshot.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: |-
|
||||
Spec defines the desired characteristics of a group snapshot requested by a user.
|
||||
Required.
|
||||
properties:
|
||||
source:
|
||||
description: |-
|
||||
Source specifies where a group snapshot will be created from.
|
||||
This field is immutable after creation.
|
||||
Required.
|
||||
properties:
|
||||
selector:
|
||||
description: |-
|
||||
Selector is a label query over persistent volume claims that are to be
|
||||
grouped together for snapshotting.
|
||||
This labelSelector will be used to match the label added to a PVC.
|
||||
If the label is added or removed to a volume after a group snapshot
|
||||
is created, the existing group snapshots won't be modified.
|
||||
Once a VolumeGroupSnapshotContent is created and the sidecar starts to process
|
||||
it, the volume list will not change with retries.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
x-kubernetes-validations:
|
||||
- message: selector is immutable
|
||||
rule: self == oldSelf
|
||||
volumeGroupSnapshotContentName:
|
||||
description: |-
|
||||
VolumeGroupSnapshotContentName specifies the name of a pre-existing VolumeGroupSnapshotContent
|
||||
object representing an existing volume group snapshot.
|
||||
This field should be set if the volume group snapshot already exists and
|
||||
only needs a representation in Kubernetes.
|
||||
This field is immutable.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: volumeGroupSnapshotContentName is immutable
|
||||
rule: self == oldSelf
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: selector is required once set
|
||||
rule: '!has(oldSelf.selector) || has(self.selector)'
|
||||
- message: volumeGroupSnapshotContentName is required once set
|
||||
rule: '!has(oldSelf.volumeGroupSnapshotContentName) || has(self.volumeGroupSnapshotContentName)'
|
||||
- message: exactly one of selector and volumeGroupSnapshotContentName
|
||||
must be set
|
||||
rule: (has(self.selector) && !has(self.volumeGroupSnapshotContentName))
|
||||
|| (!has(self.selector) && has(self.volumeGroupSnapshotContentName))
|
||||
volumeGroupSnapshotClassName:
|
||||
description: |-
|
||||
VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass
|
||||
requested by the VolumeGroupSnapshot.
|
||||
VolumeGroupSnapshotClassName may be left nil to indicate that the default
|
||||
class will be used.
|
||||
Empty string is not allowed for this field.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: volumeGroupSnapshotClassName must not be the empty string
|
||||
when set
|
||||
rule: size(self) > 0
|
||||
required:
|
||||
- source
|
||||
type: object
|
||||
status:
|
||||
description: |-
|
||||
Status represents the current information of a group snapshot.
|
||||
Consumers must verify binding between VolumeGroupSnapshot and
|
||||
VolumeGroupSnapshotContent objects is successful (by validating that both
|
||||
VolumeGroupSnapshot and VolumeGroupSnapshotContent point to each other) before
|
||||
using this object.
|
||||
properties:
|
||||
boundVolumeGroupSnapshotContentName:
|
||||
description: |-
|
||||
BoundVolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent
|
||||
object to which this VolumeGroupSnapshot object intends to bind to.
|
||||
If not specified, it indicates that the VolumeGroupSnapshot object has not
|
||||
been successfully bound to a VolumeGroupSnapshotContent object yet.
|
||||
NOTE: To avoid possible security issues, consumers must verify binding between
|
||||
VolumeGroupSnapshot and VolumeGroupSnapshotContent objects is successful
|
||||
(by validating that both VolumeGroupSnapshot and VolumeGroupSnapshotContent
|
||||
point at each other) before using this object.
|
||||
type: string
|
||||
creationTime:
|
||||
description: |-
|
||||
CreationTime is the timestamp when the point-in-time group snapshot is taken
|
||||
by the underlying storage system.
|
||||
If not specified, it may indicate that the creation time of the group snapshot
|
||||
is unknown.
|
||||
The format of this field is a Unix nanoseconds time encoded as an int64.
|
||||
On Unix, the command date +%s%N returns the current time in nanoseconds
|
||||
since 1970-01-01 00:00:00 UTC.
|
||||
format: date-time
|
||||
type: string
|
||||
error:
|
||||
description: |-
|
||||
Error is the last observed error during group snapshot creation, if any.
|
||||
This field could be helpful to upper level controllers (i.e., application
|
||||
controller) to decide whether they should continue on waiting for the group
|
||||
snapshot to be created based on the type of error reported.
|
||||
The snapshot controller will keep retrying when an error occurs during the
|
||||
group snapshot creation. Upon success, this error field will be cleared.
|
||||
properties:
|
||||
message:
|
||||
description: |-
|
||||
message is a string detailing the encountered error during snapshot
|
||||
creation if specified.
|
||||
NOTE: message may be logged, and it should not contain sensitive
|
||||
information.
|
||||
type: string
|
||||
time:
|
||||
description: time is the timestamp when the error was encountered.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
pvcVolumeSnapshotRefList:
|
||||
description: |-
|
||||
VolumeSnapshotRefList is the list of PVC and VolumeSnapshot pairs that
|
||||
is part of this group snapshot.
|
||||
The maximum number of allowed snapshots in the group is 100.
|
||||
items:
|
||||
description: PVCVolumeSnapshotPair defines a pair of a PVC reference
|
||||
and a Volume Snapshot Reference
|
||||
properties:
|
||||
persistentVolumeClaimRef:
|
||||
description: PersistentVolumeClaimRef is a reference to the
|
||||
PVC this pair is referring to
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
volumeSnapshotRef:
|
||||
description: VolumeSnapshotRef is a reference to the VolumeSnapshot
|
||||
this pair is referring to
|
||||
properties:
|
||||
name:
|
||||
description: |-
|
||||
Name of the referent.
|
||||
More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
type: array
|
||||
readyToUse:
|
||||
description: |-
|
||||
ReadyToUse indicates if all the individual snapshots in the group are ready
|
||||
to be used to restore a group of volumes.
|
||||
ReadyToUse becomes true when ReadyToUse of all individual snapshots become true.
|
||||
If not specified, it means the readiness of a group snapshot is unknown.
|
||||
type: boolean
|
||||
type: object
|
||||
required:
|
||||
- spec
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
397
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml
generated
vendored
Normal file
397
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml
generated
vendored
Normal file
@ -0,0 +1,397 @@
|
||||
# All of the individual sidecar RBAC roles get bound
|
||||
# to this account.
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: serviceaccount
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: attacher-cluster-role
|
||||
name: csi-hostpathplugin-attacher-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: health-monitor-controller-cluster-role
|
||||
name: csi-hostpathplugin-health-monitor-controller-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-health-monitor-controller-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: provisioner-cluster-role
|
||||
name: csi-hostpathplugin-provisioner-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: resizer-cluster-role
|
||||
name: csi-hostpathplugin-resizer-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-resizer-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: snapshotter-cluster-role
|
||||
name: csi-hostpathplugin-snapshotter-cluster-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: external-snapshotter-runner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: attacher-role
|
||||
name: csi-hostpathplugin-attacher-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-attacher-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: health-monitor-controller-role
|
||||
name: csi-hostpathplugin-health-monitor-controller-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-health-monitor-controller-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: provisioner-role
|
||||
name: csi-hostpathplugin-provisioner-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-provisioner-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: resizer-role
|
||||
name: csi-hostpathplugin-resizer-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-resizer-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: snapshotter-role
|
||||
name: csi-hostpathplugin-snapshotter-role
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: external-snapshotter-leaderelection
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-hostpathplugin-sa
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
spec:
|
||||
serviceName: "csi-hostpathplugin"
|
||||
# One replica only:
|
||||
# Host path driver only works when everything runs
|
||||
# on a single node.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: hostpath.csi.k8s.io
|
||||
app.kubernetes.io/part-of: csi-driver-host-path
|
||||
app.kubernetes.io/name: csi-hostpathplugin
|
||||
app.kubernetes.io/component: plugin
|
||||
spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
privileged: true
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 3
|
||||
periodSeconds: 2
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: Bidirectional
|
||||
name: mountpoint-dir
|
||||
- mountPath: /var/lib/kubelet/plugins
|
||||
mountPropagation: Bidirectional
|
||||
name: plugins-dir
|
||||
- mountPath: /csi-data-dir
|
||||
name: csi-data-dir
|
||||
- mountPath: /dev
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.12.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- mountPath: /csi-data-dir
|
||||
name: csi-data-dir
|
||||
|
||||
- name: liveness-probe
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --feature-gates=Topology=true
|
||||
# end csi-provisioner args
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --enable-volume-group-snapshots=true
|
||||
securityContext:
|
||||
# This is necessary only for systems with SELinux, where
|
||||
# non-privileged sidecar containers cannot access unix domain socket
|
||||
# created by privileged CSI driver container.
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: DirectoryOrCreate
|
||||
name: mountpoint-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
name: registration-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
type: Directory
|
||||
name: plugins-dir
|
||||
- hostPath:
|
||||
# 'path' is where PV data is persisted on host.
|
||||
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
|
||||
path: /var/lib/csi-hostpath-data/
|
||||
type: DirectoryOrCreate
|
||||
name: csi-data-dir
|
||||
- hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
name: dev-dir
|
329
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh
generated
vendored
Normal file
329
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/run_group_snapshot_e2e.sh
generated
vendored
Normal file
@ -0,0 +1,329 @@
|
||||
#!/bin/sh
|
||||
# Copyright 2024 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# hack script for running a kind e2e
|
||||
# must be run with a kubernetes checkout in $PWD (IE from the checkout)
|
||||
# Usage: SKIP="ginkgo skip regex" FOCUS="ginkgo focus regex" kind-e2e.sh
|
||||
|
||||
set -o errexit -o nounset -o xtrace
|
||||
|
||||
# Settings:
|
||||
# SKIP: ginkgo skip regex
|
||||
# FOCUS: ginkgo focus regex
|
||||
# LABEL_FILTER: ginkgo label query for selecting tests (see "Spec Labels" in https://onsi.github.io/ginkgo/#filtering-specs)
|
||||
#
|
||||
# The default is to focus on conformance tests. Serial tests get skipped when
|
||||
# parallel testing is enabled. Using LABEL_FILTER instead of combining SKIP and
|
||||
# FOCUS is recommended (more expressive, easier to read than regexp).
|
||||
#
|
||||
# GA_ONLY: true - limit to GA APIs/features as much as possible
|
||||
# false - (default) APIs and features left at defaults
|
||||
# FEATURE_GATES:
|
||||
# JSON or YAML encoding of a string/bool map: {"FeatureGateA": true, "FeatureGateB": false}
|
||||
# Enables or disables feature gates in the entire cluster.
|
||||
# Cannot be used when GA_ONLY=true.
|
||||
# RUNTIME_CONFIG:
|
||||
# JSON or YAML encoding of a string/string (!) map: {"apia.example.com/v1alpha1": "true", "apib.example.com/v1beta1": "false"}
|
||||
# Enables API groups in the apiserver via --runtime-config.
|
||||
# Cannot be used when GA_ONLY=true.
|
||||
|
||||
# cleanup logic for cleanup on exit
|
||||
CLEANED_UP=false
|
||||
cleanup() {
|
||||
if [ "$CLEANED_UP" = "true" ]; then
|
||||
return
|
||||
fi
|
||||
# KIND_CREATE_ATTEMPTED is true once we: kind create
|
||||
if [ "${KIND_CREATE_ATTEMPTED:-}" = true ]; then
|
||||
kind "export" logs "${ARTIFACTS}" || true
|
||||
kind delete cluster || true
|
||||
fi
|
||||
rm -f _output/bin/e2e.test || true
|
||||
# remove our tempdir, this needs to be last, or it will prevent kind delete
|
||||
if [ -n "${TMP_DIR:-}" ]; then
|
||||
rm -rf "${TMP_DIR:?}"
|
||||
fi
|
||||
CLEANED_UP=true
|
||||
}
|
||||
|
||||
# setup signal handlers
|
||||
# shellcheck disable=SC2317 # this is not unreachable code
|
||||
signal_handler() {
|
||||
if [ -n "${GINKGO_PID:-}" ]; then
|
||||
kill -TERM "$GINKGO_PID" || true
|
||||
fi
|
||||
cleanup
|
||||
}
|
||||
trap signal_handler INT TERM
|
||||
|
||||
# build kubernetes / node image, e2e binaries
|
||||
build() {
|
||||
# build the node image w/ kubernetes
|
||||
kind build node-image -v 1
|
||||
# Ginkgo v1 is used by Kubernetes 1.24 and earlier, fallback if v2 is not available.
|
||||
GINKGO_SRC_DIR="vendor/github.com/onsi/ginkgo/v2/ginkgo"
|
||||
if [ ! -d "$GINKGO_SRC_DIR" ]; then
|
||||
GINKGO_SRC_DIR="vendor/github.com/onsi/ginkgo/ginkgo"
|
||||
fi
|
||||
# make sure we have e2e requirements
|
||||
make all WHAT="cmd/kubectl test/e2e/e2e.test ${GINKGO_SRC_DIR}"
|
||||
|
||||
# Ensure the built kubectl is used instead of system
|
||||
export PATH="${PWD}/_output/bin:$PATH"
|
||||
}
|
||||
|
||||
check_structured_log_support() {
|
||||
case "${KUBE_VERSION}" in
|
||||
v1.1[0-8].*)
|
||||
echo "$1 is only supported on versions >= v1.19, got ${KUBE_VERSION}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# up a cluster with kind
|
||||
create_cluster() {
|
||||
# Grab the version of the cluster we're about to start
|
||||
KUBE_VERSION="$(docker run --rm --entrypoint=cat "kindest/node:latest" /kind/version)"
|
||||
|
||||
# Default Log level for all components in test clusters
|
||||
KIND_CLUSTER_LOG_LEVEL=${KIND_CLUSTER_LOG_LEVEL:-4}
|
||||
|
||||
# potentially enable --logging-format
|
||||
CLUSTER_LOG_FORMAT=${CLUSTER_LOG_FORMAT:-}
|
||||
scheduler_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
controllerManager_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
apiServer_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
if [ -n "$CLUSTER_LOG_FORMAT" ]; then
|
||||
check_structured_log_support "CLUSTER_LOG_FORMAT"
|
||||
scheduler_extra_args="${scheduler_extra_args}
|
||||
\"logging-format\": \"${CLUSTER_LOG_FORMAT}\""
|
||||
controllerManager_extra_args="${controllerManager_extra_args}
|
||||
\"logging-format\": \"${CLUSTER_LOG_FORMAT}\""
|
||||
apiServer_extra_args="${apiServer_extra_args}
|
||||
\"logging-format\": \"${CLUSTER_LOG_FORMAT}\""
|
||||
fi
|
||||
kubelet_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\""
|
||||
KUBELET_LOG_FORMAT=${KUBELET_LOG_FORMAT:-$CLUSTER_LOG_FORMAT}
|
||||
if [ -n "$KUBELET_LOG_FORMAT" ]; then
|
||||
check_structured_log_support "KUBECTL_LOG_FORMAT"
|
||||
kubelet_extra_args="${kubelet_extra_args}
|
||||
\"logging-format\": \"${KUBELET_LOG_FORMAT}\""
|
||||
fi
|
||||
|
||||
# JSON or YAML map injected into featureGates config
|
||||
feature_gates="${FEATURE_GATES:-{\}}"
|
||||
# --runtime-config argument value passed to the API server, again as a map
|
||||
runtime_config="${RUNTIME_CONFIG:-{\}}"
|
||||
|
||||
case "${GA_ONLY:-false}" in
|
||||
false)
|
||||
:
|
||||
;;
|
||||
true)
|
||||
if [ "${feature_gates}" != "{}" ]; then
|
||||
echo "GA_ONLY=true and FEATURE_GATES=${feature_gates} are mutually exclusive."
|
||||
exit 1
|
||||
fi
|
||||
if [ "${runtime_config}" != "{}" ]; then
|
||||
echo "GA_ONLY=true and RUNTIME_CONFIG=${runtime_config} are mutually exclusive."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Limiting to GA APIs and features for ${KUBE_VERSION}"
|
||||
feature_gates='{"AllAlpha":false,"AllBeta":false}'
|
||||
runtime_config='{"api/alpha":"false", "api/beta":"false"}'
|
||||
;;
|
||||
*)
|
||||
echo "\$GA_ONLY set to '${GA_ONLY}'; supported values are true and false (default)"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# create the config file
|
||||
cat <<EOF > "${ARTIFACTS}/kind-config.yaml"
|
||||
# config for 1 control plane node and 2 workers (necessary for conformance)
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
networking:
|
||||
ipFamily: ${IP_FAMILY:-ipv4}
|
||||
kubeProxyMode: ${KUBE_PROXY_MODE:-iptables}
|
||||
# don't pass through host search paths
|
||||
# TODO: possibly a reasonable default in the future for kind ...
|
||||
dnsSearch: []
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
- role: worker
|
||||
featureGates: ${feature_gates}
|
||||
runtimeConfig: ${runtime_config}
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
metadata:
|
||||
name: config
|
||||
apiServer:
|
||||
extraArgs:
|
||||
${apiServer_extra_args}
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
${controllerManager_extra_args}
|
||||
scheduler:
|
||||
extraArgs:
|
||||
${scheduler_extra_args}
|
||||
---
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
${kubelet_extra_args}
|
||||
---
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
${kubelet_extra_args}
|
||||
EOF
|
||||
# NOTE: must match the number of workers above
|
||||
NUM_NODES=2
|
||||
# actually create the cluster
|
||||
# TODO(BenTheElder): settle on verbosity for this script
|
||||
KIND_CREATE_ATTEMPTED=true
|
||||
kind create cluster \
|
||||
--image=kindest/node:latest \
|
||||
--retain \
|
||||
--wait=1m \
|
||||
-v=3 \
|
||||
"--config=${ARTIFACTS}/kind-config.yaml"
|
||||
|
||||
# debug cluster version
|
||||
kubectl version
|
||||
|
||||
# Patch kube-proxy to set the verbosity level
|
||||
kubectl patch -n kube-system daemonset/kube-proxy \
|
||||
--type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--v='"${KIND_CLUSTER_LOG_LEVEL}"'" }]'
|
||||
}
|
||||
|
||||
# run e2es with ginkgo-e2e.sh
|
||||
run_tests() {
|
||||
# IPv6 clusters need some CoreDNS changes in order to work in k8s CI:
|
||||
# 1. k8s CI doesn´t offer IPv6 connectivity, so CoreDNS should be configured
|
||||
# to work in an offline environment:
|
||||
# https://github.com/coredns/coredns/issues/2494#issuecomment-457215452
|
||||
# 2. k8s CI adds following domains to resolv.conf search field:
|
||||
# c.k8s-prow-builds.internal google.internal.
|
||||
# CoreDNS should handle those domains and answer with NXDOMAIN instead of SERVFAIL
|
||||
# otherwise pods stops trying to resolve the domain.
|
||||
if [ "${IP_FAMILY:-ipv4}" = "ipv6" ]; then
|
||||
# Get the current config
|
||||
original_coredns=$(kubectl get -oyaml -n=kube-system configmap/coredns)
|
||||
echo "Original CoreDNS config:"
|
||||
echo "${original_coredns}"
|
||||
# Patch it
|
||||
fixed_coredns=$(
|
||||
printf '%s' "${original_coredns}" | sed \
|
||||
-e 's/^.*kubernetes cluster\.local/& internal/' \
|
||||
-e '/^.*upstream$/d' \
|
||||
-e '/^.*fallthrough.*$/d' \
|
||||
-e '/^.*forward . \/etc\/resolv.conf$/d' \
|
||||
-e '/^.*loop$/d' \
|
||||
)
|
||||
echo "Patched CoreDNS config:"
|
||||
echo "${fixed_coredns}"
|
||||
printf '%s' "${fixed_coredns}" | kubectl apply -f -
|
||||
fi
|
||||
|
||||
# ginkgo regexes and label filter
|
||||
SKIP="${SKIP:-}"
|
||||
FOCUS="${FOCUS:-}"
|
||||
LABEL_FILTER="${LABEL_FILTER:-}"
|
||||
if [ -z "${FOCUS}" ] && [ -z "${LABEL_FILTER}" ]; then
|
||||
FOCUS="\\[Conformance\\]"
|
||||
fi
|
||||
# if we set PARALLEL=true, skip serial tests set --ginkgo-parallel
|
||||
if [ "${PARALLEL:-false}" = "true" ]; then
|
||||
export GINKGO_PARALLEL=y
|
||||
if [ -z "${SKIP}" ]; then
|
||||
SKIP="\\[Serial\\]"
|
||||
else
|
||||
SKIP="\\[Serial\\]|${SKIP}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# setting this env prevents ginkgo e2e from trying to run provider setup
|
||||
export KUBERNETES_CONFORMANCE_TEST='y'
|
||||
# setting these is required to make RuntimeClass tests work ... :/
|
||||
export KUBE_CONTAINER_RUNTIME=remote
|
||||
export KUBE_CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock
|
||||
export KUBE_CONTAINER_RUNTIME_NAME=containerd
|
||||
# ginkgo can take forever to exit, so we run it in the background and save the
|
||||
# PID, bash will not run traps while waiting on a process, but it will while
|
||||
# running a builtin like `wait`, saving the PID also allows us to forward the
|
||||
# interrupt
|
||||
|
||||
kubectl apply -f ./cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml || exit 1
|
||||
kubectl apply -f ./cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml || exit 1
|
||||
kubectl apply -f ./cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshots.yaml || exit 1
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml || exit 1
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml || exit 1
|
||||
kubectl apply -f test/e2e/testing-manifests/storage-csi/external-snapshotter/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml || exit 1
|
||||
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.0.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1
|
||||
curl -s https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/release-8.1/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml | \
|
||||
awk '/--leader-election=true/ {print; print " - \"--enable-volume-group-snapshots=true\""; next}1' | \
|
||||
kubectl apply -f - || exit 1
|
||||
|
||||
|
||||
./hack/ginkgo-e2e.sh \
|
||||
'--provider=skeleton' "--num-nodes=${NUM_NODES}" \
|
||||
"--ginkgo.focus=${FOCUS}" "--ginkgo.skip=${SKIP}" "--ginkgo.label-filter=${LABEL_FILTER}" \
|
||||
"--report-dir=${ARTIFACTS}" '--disable-log-dump=true' &
|
||||
GINKGO_PID=$!
|
||||
wait "$GINKGO_PID"
|
||||
}
|
||||
|
||||
main() {
|
||||
# create temp dir and setup cleanup
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
# ensure artifacts (results) directory exists when not in CI
|
||||
export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}"
|
||||
mkdir -p "${ARTIFACTS}"
|
||||
|
||||
# export the KUBECONFIG to a unique path for testing
|
||||
KUBECONFIG="${HOME}/.kube/kind-test-config"
|
||||
export KUBECONFIG
|
||||
echo "exported KUBECONFIG=${KUBECONFIG}"
|
||||
|
||||
# debug kind version
|
||||
kind version
|
||||
cp test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml || exit 1
|
||||
# build kubernetes
|
||||
build
|
||||
# in CI attempt to release some memory after building
|
||||
if [ -n "${KUBETEST_IN_DOCKER:-}" ]; then
|
||||
sync || true
|
||||
echo 1 > /proc/sys/vm/drop_caches || true
|
||||
fi
|
||||
|
||||
|
||||
# create the cluster and run tests
|
||||
res=0
|
||||
create_cluster || res=$?
|
||||
run_tests || res=$?
|
||||
cleanup || res=$?
|
||||
exit $res
|
||||
}
|
||||
|
||||
main
|
@ -21,7 +21,7 @@ spec:
|
||||
serviceAccountName: csi-gce-pd-controller-sa
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -39,7 +39,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -73,7 +73,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.7.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -102,7 +102,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.12.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
|
@ -1,4 +1,4 @@
|
||||
# All of the individual sidecar RBAC roles get bound
|
||||
# All of the individual sidecar RBAC roles get bound
|
||||
# to this account.
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
@ -219,7 +219,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -276,7 +276,7 @@ spec:
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -304,13 +304,13 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.14.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.7.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -324,7 +324,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -340,7 +340,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.12.0
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
@ -354,7 +354,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
@ -66,7 +66,7 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: socat
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
command:
|
||||
- socat
|
||||
args:
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.7.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.12.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -34,7 +34,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -35,7 +35,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.15.0
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
|
3
vendor/k8s.io/kubernetes/test/utils/density_utils.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/utils/density_utils.go
generated
vendored
@ -18,6 +18,7 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@ -99,7 +100,7 @@ func VerifyLabelsRemoved(c clientset.Interface, nodeName string, labelKeys []str
|
||||
}
|
||||
for _, labelKey := range labelKeys {
|
||||
if node.Labels != nil && len(node.Labels[labelKey]) != 0 {
|
||||
return fmt.Errorf("Failed removing label " + labelKey + " of the node " + nodeName)
|
||||
return errors.New("Failed removing label " + labelKey + " of the node " + nodeName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
3
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
@ -18,6 +18,7 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -226,7 +227,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
})
|
||||
if wait.Interrupted(err) {
|
||||
LogReplicaSetsOfDeployment(deployment, nil, newRS, logf)
|
||||
err = fmt.Errorf(reason)
|
||||
err = errors.New(reason)
|
||||
}
|
||||
if newRS == nil {
|
||||
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
|
||||
|
14
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
yaml "sigs.k8s.io/yaml/goyaml.v2"
|
||||
)
|
||||
|
||||
// RegistryList holds public and private image registries
|
||||
@ -163,10 +163,6 @@ const (
|
||||
AuthenticatedWindowsNanoServer
|
||||
// BusyBox image
|
||||
BusyBox
|
||||
// CudaVectorAdd image
|
||||
CudaVectorAdd
|
||||
// CudaVectorAdd2 image
|
||||
CudaVectorAdd2
|
||||
// DistrolessIptables Image
|
||||
DistrolessIptables
|
||||
// Etcd image
|
||||
@ -220,17 +216,15 @@ const (
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
|
||||
configs := map[ImageID]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.52"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.53"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.29.2"}
|
||||
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.5.9"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.15-0"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.6.6"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.16-0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
|
2
vendor/k8s.io/kubernetes/test/utils/node.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/utils/node.go
generated
vendored
@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
package utils
|
||||
|
||||
import "k8s.io/api/core/v1"
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
|
||||
// GetNodeCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
|
10
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -664,7 +664,7 @@ func (config *RCConfig) start(ctx context.Context) error {
|
||||
*config.CreatedPods = startupStatus.Created
|
||||
}
|
||||
if !config.Silent {
|
||||
config.RCConfigLog(startupStatus.String(config.Name))
|
||||
config.RCConfigLog("%s", startupStatus.String(config.Name))
|
||||
}
|
||||
|
||||
if config.PodStatusFile != nil {
|
||||
@ -688,8 +688,8 @@ func (config *RCConfig) start(ctx context.Context) error {
|
||||
if podDeletionsCount > config.MaxAllowedPodDeletions {
|
||||
// Number of pods which disappeared is over threshold
|
||||
err := fmt.Errorf("%d pods disappeared for %s: %v", podDeletionsCount, config.Name, strings.Join(deletedPods, ", "))
|
||||
config.RCConfigLog(err.Error())
|
||||
config.RCConfigLog(diff.String(sets.NewString()))
|
||||
config.RCConfigLog("%s", err.Error())
|
||||
config.RCConfigLog("%s", diff.String(sets.NewString()))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1194,6 +1194,10 @@ func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interfa
|
||||
// PVs are cluster-wide resources.
|
||||
// Prepend a namespace to make the name globally unique.
|
||||
pv.Name = fmt.Sprintf("%s-%s", namespace, pv.Name)
|
||||
pvs := pv.Spec.PersistentVolumeSource
|
||||
if pvs.CSI != nil {
|
||||
pvs.CSI.VolumeHandle = pv.Name
|
||||
}
|
||||
if bindVolume {
|
||||
// bind pv to "pvc-$i"
|
||||
pv.Spec.ClaimRef = &v1.ObjectReference{
|
||||
|
9
vendor/k8s.io/kubernetes/test/utils/update_resources.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/utils/update_resources.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -35,9 +36,9 @@ const (
|
||||
waitRetryTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
func RetryErrorCondition(condition wait.ConditionFunc) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
done, err := condition()
|
||||
func RetryErrorCondition(condition wait.ConditionWithContextFunc) wait.ConditionWithContextFunc {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
done, err := condition(ctx)
|
||||
return done, err
|
||||
}
|
||||
}
|
||||
@ -50,7 +51,7 @@ func ScaleResourceWithRetries(scalesGetter scaleclient.ScalesGetter, namespace,
|
||||
}
|
||||
waitForReplicas := scale.NewRetryParams(waitRetryInterval, waitRetryTimeout)
|
||||
cond := RetryErrorCondition(scale.ScaleCondition(scaler, preconditions, namespace, name, size, nil, gvr, false))
|
||||
err := wait.PollImmediate(updateRetryInterval, updateRetryTimeout, cond)
|
||||
err := wait.PollUntilContextTimeout(context.Background(), updateRetryInterval, updateRetryTimeout, true, cond)
|
||||
if err == nil {
|
||||
err = scale.WaitForScaleHasDesiredReplicas(scalesGetter, gvr.GroupResource(), name, namespace, size, waitForReplicas)
|
||||
}
|
||||
|
Reference in New Issue
Block a user