mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update kubernetes to latest
updating the kubernetes release to the latest in main go.mod Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
63c4c05b35
commit
5a66991bb3
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -36,9 +36,9 @@ rules:
|
||||
allowedPrefixes: [
|
||||
"gopkg.in/inf.v0",
|
||||
"gopkg.in/yaml.v2",
|
||||
"gopkg.in/evanphx/json-patch.v4",
|
||||
"github.com/blang/semver/",
|
||||
"github.com/davecgh/go-spew/spew",
|
||||
"github.com/evanphx/json-patch",
|
||||
"github.com/go-logr/logr",
|
||||
"github.com/gogo/protobuf/proto",
|
||||
"github.com/gogo/protobuf/sortkeys",
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
@ -118,14 +118,14 @@ func DumpNodeDebugInfo(ctx context.Context, c clientset.Interface, nodeNames []s
|
||||
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
|
||||
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
|
||||
}
|
||||
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
|
||||
logFunc("\nLogging pods the kubelet thinks are on node %v", n)
|
||||
podList, err := getKubeletPods(ctx, c, n)
|
||||
if err != nil {
|
||||
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
|
||||
continue
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
logFunc("%s/%s started at %v (%d+%d container statuses recorded)", p.Namespace, p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.InitContainerStatuses {
|
||||
logFunc("\tInit container %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -293,15 +293,6 @@ func (f *FailureError) backtrace() {
|
||||
// }
|
||||
var ErrFailure error = FailureError{}
|
||||
|
||||
// ExpectError expects an error happens, otherwise an exception raises
|
||||
//
|
||||
// Deprecated: use gomega.Expect().To(gomega.HaveOccurred()) or (better!) check
|
||||
// specifically for the error that is expected with
|
||||
// gomega.Expect().To(gomega.MatchError(gomega.ContainSubstring()))
|
||||
func ExpectError(err error, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
func ExpectNoError(err error, explain ...interface{}) {
|
||||
ExpectNoErrorWithOffset(1, err, explain...)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -720,7 +720,7 @@ func (cl *ClusterVerification) WaitFor(ctx context.Context, atLeast int, timeout
|
||||
pods := []v1.Pod{}
|
||||
var returnedErr error
|
||||
|
||||
err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
pods, returnedErr = cl.podState.filter(ctx, cl.client, cl.namespace)
|
||||
|
||||
// Failure
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
@ -209,8 +209,9 @@ func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interf
|
||||
case label:
|
||||
fullLabel := strings.Join(arg.parts, ":")
|
||||
addLabel(fullLabel)
|
||||
if arg.extra != "" {
|
||||
addLabel(arg.extra)
|
||||
if arg.extraFeature != "" {
|
||||
texts = append(texts, fmt.Sprintf("[%s]", arg.extraFeature))
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Label("Feature:"+arg.extraFeature))
|
||||
}
|
||||
if fullLabel == "Serial" {
|
||||
ginkgoArgs = append(ginkgoArgs, ginkgo.Serial)
|
||||
@ -309,6 +310,10 @@ func validateText(location types.CodeLocation, text string, labels []string) {
|
||||
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s instead", tag, tag))
|
||||
}
|
||||
if deprecatedStability.Has(tag) {
|
||||
if slices.Contains(labels, "Feature:"+tag) {
|
||||
// Okay, was also set as label.
|
||||
continue
|
||||
}
|
||||
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added by defining the feature gate through WithFeatureGate instead", tag))
|
||||
}
|
||||
if index := strings.Index(tag, ":"); index > 0 {
|
||||
@ -353,6 +358,16 @@ func withFeature(name Feature) interface{} {
|
||||
// [k8s.io/apiserver/pkg/util/feature.DefaultMutableFeatureGate]. Once a
|
||||
// feature gate gets removed from there, the WithFeatureGate calls using it
|
||||
// also need to be removed.
|
||||
//
|
||||
// [Alpha] resp. [Beta] get added to the test name automatically depending
|
||||
// on the current stability level of the feature. Feature:Alpha resp.
|
||||
// Feature:Beta get added to the Ginkgo labels because this is a special
|
||||
// requirement for how the cluster needs to be configured.
|
||||
//
|
||||
// If the test can run in any cluster that has alpha resp. beta features and
|
||||
// API groups enabled, then annotating it with just WithFeatureGate is
|
||||
// sufficient. Otherwise, WithFeature has to be used to define the additional
|
||||
// requirements.
|
||||
func WithFeatureGate(featureGate featuregate.Feature) interface{} {
|
||||
return withFeatureGate(featureGate)
|
||||
}
|
||||
@ -376,7 +391,7 @@ func withFeatureGate(featureGate featuregate.Feature) interface{} {
|
||||
}
|
||||
|
||||
l := newLabel("FeatureGate", string(featureGate))
|
||||
l.extra = level
|
||||
l.extraFeature = level
|
||||
return l
|
||||
}
|
||||
|
||||
@ -544,8 +559,9 @@ func withFlaky() interface{} {
|
||||
type label struct {
|
||||
// parts get concatenated with ":" to build the full label.
|
||||
parts []string
|
||||
// extra is an optional fully-formed extra label.
|
||||
extra string
|
||||
// extra is an optional feature name. It gets added as [<extraFeature>]
|
||||
// to the test name and as Feature:<extraFeature> to the labels.
|
||||
extraFeature string
|
||||
// explanation gets set for each label to help developers
|
||||
// who pass a label to a ginkgo function. They need to use
|
||||
// the corresponding framework function instead.
|
||||
@ -572,7 +588,7 @@ func TagsEqual(a, b interface{}) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if al.extra != bl.extra {
|
||||
if al.extraFeature != bl.extraFeature {
|
||||
return false
|
||||
}
|
||||
return slices.Equal(al.parts, bl.parts)
|
||||
|
47
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kube_proxy_metrics.go
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kube_proxy_metrics.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
)
|
||||
|
||||
// KubeProxyMetrics is metrics for kube-proxy
|
||||
type KubeProxyMetrics testutil.Metrics
|
||||
|
||||
// GetCounterMetricValue returns value for metric type counter.
|
||||
func (m *KubeProxyMetrics) GetCounterMetricValue(metricName string) (float64, error) {
|
||||
if len(testutil.Metrics(*m)[metricName]) == 0 {
|
||||
return 0, fmt.Errorf("metric '%s' not found", metricName)
|
||||
}
|
||||
return float64(testutil.Metrics(*m)[metricName][0].Value), nil
|
||||
}
|
||||
|
||||
func newKubeProxyMetricsMetrics() KubeProxyMetrics {
|
||||
result := testutil.NewMetrics()
|
||||
return KubeProxyMetrics(result)
|
||||
}
|
||||
|
||||
func parseKubeProxyMetrics(data string) (KubeProxyMetrics, error) {
|
||||
result := newKubeProxyMetricsMetrics()
|
||||
if err := testutil.ParseMetrics(data, (*testutil.Metrics)(&result)); err != nil {
|
||||
return KubeProxyMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
45
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -32,8 +33,9 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -43,6 +45,8 @@ const (
|
||||
kubeControllerManagerPort = 10257
|
||||
// snapshotControllerPort is the port for the snapshot controller
|
||||
snapshotControllerPort = 9102
|
||||
// kubeProxyPort is the default port for the kube-proxy status server.
|
||||
kubeProxyPort = 10249
|
||||
)
|
||||
|
||||
// MetricsGrabbingDisabledError is an error that is wrapped by the
|
||||
@ -233,6 +237,45 @@ func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubel
|
||||
}
|
||||
}
|
||||
|
||||
// GrabFromKubeProxy returns metrics from kube-proxy
|
||||
func (g *Grabber) GrabFromKubeProxy(ctx context.Context, nodeName string) (KubeProxyMetrics, error) {
|
||||
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
|
||||
if err != nil {
|
||||
return KubeProxyMetrics{}, err
|
||||
}
|
||||
|
||||
if len(nodes.Items) != 1 {
|
||||
return KubeProxyMetrics{}, fmt.Errorf("error listing nodes with name %v, got %v", nodeName, nodes.Items)
|
||||
}
|
||||
output, err := g.grabFromKubeProxy(ctx, nodeName)
|
||||
if err != nil {
|
||||
return KubeProxyMetrics{}, err
|
||||
}
|
||||
return parseKubeProxyMetrics(output)
|
||||
}
|
||||
|
||||
func (g *Grabber) grabFromKubeProxy(ctx context.Context, nodeName string) (string, error) {
|
||||
hostCmdPodName := fmt.Sprintf("grab-kube-proxy-metrics-%s", framework.RandomSuffix())
|
||||
hostCmdPod := e2epod.NewExecPodSpec(metav1.NamespaceSystem, hostCmdPodName, true)
|
||||
nodeSelection := e2epod.NodeSelection{Name: nodeName}
|
||||
e2epod.SetNodeSelection(&hostCmdPod.Spec, nodeSelection)
|
||||
if _, err := g.client.CoreV1().Pods(metav1.NamespaceSystem).Create(ctx, hostCmdPod, metav1.CreateOptions{}); err != nil {
|
||||
return "", fmt.Errorf("failed to create pod to fetch metrics: %w", err)
|
||||
}
|
||||
if err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, hostCmdPodName, metav1.NamespaceSystem, 5*time.Minute); err != nil {
|
||||
return "", fmt.Errorf("error waiting for pod to be up: %w", err)
|
||||
}
|
||||
|
||||
host := "127.0.0.1"
|
||||
if framework.TestContext.ClusterIsIPv6() {
|
||||
host = "::1"
|
||||
}
|
||||
|
||||
stdout, err := e2epodoutput.RunHostCmd(metav1.NamespaceSystem, hostCmdPodName, fmt.Sprintf("curl --silent %s/metrics", net.JoinHostPort(host, strconv.Itoa(kubeProxyPort))))
|
||||
_ = g.client.CoreV1().Pods(metav1.NamespaceSystem).Delete(ctx, hostCmdPodName, metav1.DeleteOptions{})
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// GrabFromScheduler returns metrics from scheduler
|
||||
func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, error) {
|
||||
if !g.grabFromScheduler {
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
@ -46,10 +46,11 @@ func WaitForAllNodesSchedulable(ctx context.Context, c clientset.Interface, time
|
||||
}
|
||||
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediateWithContext(
|
||||
return wait.PollUntilContextTimeout(
|
||||
ctx,
|
||||
30*time.Second,
|
||||
timeout,
|
||||
true,
|
||||
CheckReadyForTests(ctx, c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -131,7 +131,7 @@ func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interfa
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
command = InfiniteSleepCommand
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -172,7 +172,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
return nil, fmt.Errorf("Cannot create pod with empty namespace")
|
||||
}
|
||||
if len(podConfig.Command) == 0 {
|
||||
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
|
||||
podConfig.Command = InfiniteSleepCommand
|
||||
}
|
||||
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/pod/exec_util.go
generated
vendored
@ -51,6 +51,10 @@ type ExecOptions struct {
|
||||
// returning stdout, stderr and error. `options` allowed for
|
||||
// additional parameters to be passed.
|
||||
func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, string, error) {
|
||||
return ExecWithOptionsContext(context.Background(), f, options)
|
||||
}
|
||||
|
||||
func ExecWithOptionsContext(ctx context.Context, f *framework.Framework, options ExecOptions) (string, string, error) {
|
||||
if !options.Quiet {
|
||||
framework.Logf("ExecWithOptions %+v", options)
|
||||
}
|
||||
@ -77,7 +81,8 @@ func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, strin
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
err = execute(ctx, "POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
@ -139,12 +144,12 @@ func ExecShellInPodWithFullOutput(ctx context.Context, f *framework.Framework, p
|
||||
return execCommandInPodWithFullOutput(ctx, f, podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
func execute(ctx context.Context, method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, method, url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{
|
||||
return exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
|
63
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
63
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
ginkgotypes "github.com/onsi/ginkgo/v2/types"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -56,6 +57,19 @@ const (
|
||||
|
||||
// it is copied from k8s.io/kubernetes/pkg/kubelet/sysctl
|
||||
forbiddenReason = "SysctlForbidden"
|
||||
|
||||
// which test created this pod?
|
||||
AnnotationTestOwner = "owner.test"
|
||||
)
|
||||
|
||||
// global flags so we can enable features per-suite instead of per-client.
|
||||
var (
|
||||
// GlobalOwnerTracking controls if newly created PodClients should automatically annotate
|
||||
// the pod with the owner test. The owner test is identified by "sourcecodepath:linenumber".
|
||||
// Annotating the pods this way is useful to troubleshoot tests which do insufficient cleanup.
|
||||
// Default is false to maximize backward compatibility.
|
||||
// See also: WithOwnerTracking, AnnotationTestOwner
|
||||
GlobalOwnerTracking bool
|
||||
)
|
||||
|
||||
// ImagePrePullList is the images used in the current test suite. It should be initialized in test suite and
|
||||
@ -68,9 +82,10 @@ var ImagePrePullList sets.String
|
||||
// node e2e pod scheduling.
|
||||
func NewPodClient(f *framework.Framework) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
namespace: f.Namespace.Name,
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
namespace: f.Namespace.Name,
|
||||
ownerTracking: GlobalOwnerTracking,
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,9 +94,10 @@ func NewPodClient(f *framework.Framework) *PodClient {
|
||||
// node e2e pod scheduling.
|
||||
func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
namespace: namespace,
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
namespace: namespace,
|
||||
ownerTracking: GlobalOwnerTracking,
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,19 +105,34 @@ func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
type PodClient struct {
|
||||
f *framework.Framework
|
||||
v1core.PodInterface
|
||||
namespace string
|
||||
namespace string
|
||||
ownerTracking bool
|
||||
}
|
||||
|
||||
// WithOwnerTracking controls automatic add of annotations recording the code location
|
||||
// which created a pod. This is helpful when troubleshooting e2e tests (like e2e_node)
|
||||
// which leak pods because insufficient cleanup.
|
||||
// Note we want a shallow clone to avoid mutating the receiver.
|
||||
// The default is the value of GlobalOwnerTracking *when the client was created*.
|
||||
func (c PodClient) WithOwnerTracking(value bool) *PodClient {
|
||||
c.ownerTracking = value
|
||||
return &c
|
||||
}
|
||||
|
||||
// Create creates a new pod according to the framework specifications (don't wait for it to start).
|
||||
func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
c.mungeSpec(pod)
|
||||
c.setOwnerAnnotation(pod)
|
||||
p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Error creating Pod")
|
||||
return p
|
||||
|
||||
}
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
|
||||
func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
p := c.Create(ctx, pod)
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, c.namespace, framework.PodStartTimeout))
|
||||
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
|
||||
@ -112,6 +143,7 @@ func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
|
||||
// CreateBatch create a batch of pods. All pods are created before waiting.
|
||||
func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
ps := make([]*v1.Pod, len(pods))
|
||||
var wg sync.WaitGroup
|
||||
for i, pod := range pods {
|
||||
@ -130,7 +162,7 @@ func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
|
||||
// there is any other apierrors. name is the pod name, updateFn is the function updating the
|
||||
// pod object.
|
||||
func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *v1.Pod)) {
|
||||
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
|
||||
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Millisecond*500, time.Second*30, false, func(ctx context.Context) (bool, error) {
|
||||
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
|
||||
@ -192,6 +224,19 @@ func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.
|
||||
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, c.namespace, timeout), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// addTestOrigin adds annotations to help identifying tests which incorrectly leak pods because insufficient cleanup
|
||||
func (c *PodClient) setOwnerAnnotation(pod *v1.Pod) {
|
||||
if !c.ownerTracking {
|
||||
return
|
||||
}
|
||||
ginkgo.GinkgoHelper()
|
||||
location := ginkgotypes.NewCodeLocation(0)
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = make(map[string]string)
|
||||
}
|
||||
pod.Annotations[AnnotationTestOwner] = fmt.Sprintf("%s:%d", location.FileName, location.LineNumber)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
if !framework.TestContext.NodeE2E {
|
||||
@ -264,7 +309,7 @@ func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time
|
||||
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
|
||||
func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) {
|
||||
var ev *v1.Event
|
||||
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %w", err)
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -43,11 +43,11 @@ func NodeOSDistroIs(distro string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
const InfiniteSleepCommand = "trap exit TERM; while true; do sleep 1; done"
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
return commands
|
||||
return []string{"/bin/sh", "-c", command}
|
||||
}
|
||||
|
||||
// GetDefaultTestImage returns the default test image based on OS.
|
||||
|
119
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
119
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -99,17 +99,22 @@ func BeInPhase(phase v1.PodPhase) types.GomegaMatcher {
|
||||
}).WithTemplate("Expected Pod {{.To}} be in {{format .Data}}\nGot instead:\n{{.FormattedActual}}").WithTemplateData(phase)
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
||||
// namespace ns are either running and ready, or failed but controlled by a
|
||||
// controller. Also, it ensures that at least minPods are running and
|
||||
// ready. It has separate behavior from other 'wait for' pods functions in
|
||||
// that it requests the list of pods on every iteration. This is useful, for
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
// WaitForAlmostAllReady waits up to timeout for the following conditions:
|
||||
// 1. At least minPods Pods in Namespace ns are Running and Ready
|
||||
// 2. All Pods in Namespace ns are either Ready or Succeeded
|
||||
// 3. All Pods part of a ReplicaSet or ReplicationController in Namespace ns are Ready
|
||||
//
|
||||
// After the timeout has elapsed, an error is returned if the number of Pods in a Pending Phase
|
||||
// is greater than allowedNotReadyPods.
|
||||
//
|
||||
// It is generally recommended to use WaitForPodsRunningReady instead of this function
|
||||
// whenever possible, because its behavior is more intuitive. Similar to WaitForPodsRunningReady,
|
||||
// this function requests the list of pods on every iteration, making it useful for situations
|
||||
// where the set of Pods is likely changing, such as during cluster startup.
|
||||
//
|
||||
// If minPods or allowedNotReadyPods are -1, this method returns immediately
|
||||
// without waiting.
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration) error {
|
||||
func WaitForAlmostAllPodsReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int, timeout time.Duration) error {
|
||||
if minPods == -1 || allowedNotReadyPods == -1 {
|
||||
return nil
|
||||
}
|
||||
@ -126,14 +131,12 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
Pods []v1.Pod
|
||||
}
|
||||
|
||||
// notReady is -1 for any failure other than a timeout.
|
||||
// Otherwise it is the number of pods that we were still
|
||||
// waiting for.
|
||||
notReady := int32(-1)
|
||||
nOk := 0
|
||||
badPods := []v1.Pod{}
|
||||
otherPods := []v1.Pod{}
|
||||
succeededPods := []string{}
|
||||
|
||||
err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
|
||||
// Reset notReady at the start of a poll attempt.
|
||||
notReady = -1
|
||||
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -163,11 +166,10 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
replicaOk += rs.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
nOk := int32(0)
|
||||
notReady = int32(0)
|
||||
failedPods := []v1.Pod{}
|
||||
otherPods := []v1.Pod{}
|
||||
succeededPods := []string{}
|
||||
nOk = 0
|
||||
badPods = []v1.Pod{}
|
||||
otherPods = []v1.Pod{}
|
||||
succeededPods = []string{}
|
||||
for _, pod := range s.Pods {
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
@ -179,14 +181,13 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
case pod.Status.Phase == v1.PodFailed:
|
||||
// ignore failed pods that are controlled by some controller
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
failedPods = append(failedPods, pod)
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
default:
|
||||
notReady++
|
||||
otherPods = append(otherPods, pod)
|
||||
}
|
||||
}
|
||||
done := replicaOk == replicas && nOk >= minPods && (len(failedPods)+len(otherPods)) == 0
|
||||
done := replicaOk == replicas && nOk >= minPods && (len(badPods)+len(otherPods)) == 0
|
||||
if done {
|
||||
return nil, nil
|
||||
}
|
||||
@ -200,8 +201,8 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
if len(succeededPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that completed successfully:\n%s", format.Object(succeededPods, 1)))
|
||||
}
|
||||
if len(failedPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(failedPods, 1)))
|
||||
if len(badPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(badPods, 1)))
|
||||
}
|
||||
if len(otherPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that were neither completed nor running:\n%s", format.Object(otherPods, 1)))
|
||||
@ -211,13 +212,79 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
|
||||
}))
|
||||
|
||||
// An error might not be fatal.
|
||||
if err != nil && notReady >= 0 && notReady <= allowedNotReadyPods {
|
||||
framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
if len(otherPods) <= allowedNotReadyPods {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout for the following conditions:
|
||||
// 1. At least minPods Pods in Namespace ns are Running and Ready
|
||||
// 2. No Pods in Namespace ns are Failed and not owned by a controller or Pending
|
||||
//
|
||||
// An error is returned if either of these conditions are not met within the timeout.
|
||||
//
|
||||
// It has separate behavior from other 'wait for' pods functions in
|
||||
// that it requests the list of pods on every iteration. This is useful, for
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods int, timeout time.Duration) error {
|
||||
|
||||
return framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) ([]v1.Pod, error) {
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing pods in namespace %s: %w", ns, err)
|
||||
}
|
||||
return podList.Items, nil
|
||||
})).WithTimeout(timeout).Should(framework.MakeMatcher(func(pods []v1.Pod) (func() string, error) {
|
||||
|
||||
nOk := 0
|
||||
badPods := []v1.Pod{}
|
||||
otherPods := []v1.Pod{}
|
||||
succeededPods := []string{}
|
||||
|
||||
for _, pod := range pods {
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
case res && err == nil:
|
||||
nOk++
|
||||
case pod.Status.Phase == v1.PodSucceeded:
|
||||
// ignore succeeded pods
|
||||
succeededPods = append(succeededPods, pod.Name)
|
||||
case pod.Status.Phase == v1.PodFailed:
|
||||
// ignore failed pods that are controlled by some controller
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
default:
|
||||
otherPods = append(otherPods, pod)
|
||||
}
|
||||
}
|
||||
if nOk >= minPods && len(badPods)+len(otherPods) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Delayed formatting of a failure message.
|
||||
return func() string {
|
||||
var buffer strings.Builder
|
||||
buffer.WriteString(fmt.Sprintf("Expected all pods (need at least %d) in namespace %q to be running and ready \n", minPods, ns))
|
||||
buffer.WriteString(fmt.Sprintf("%d / %d pods were running and ready.\n", nOk, len(pods)))
|
||||
if len(succeededPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that completed successfully:\n%s", format.Object(succeededPods, 1)))
|
||||
}
|
||||
if len(badPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(badPods, 1)))
|
||||
}
|
||||
if len(otherPods) > 0 {
|
||||
buffer.WriteString(fmt.Sprintf("Pods that were neither completed nor running:\n%s", format.Object(otherPods, 1)))
|
||||
}
|
||||
return buffer.String()
|
||||
}, nil
|
||||
}))
|
||||
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
// The condition callback may use gomega.StopTrying to abort early.
|
||||
func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
|
||||
|
27
vendor/k8s.io/kubernetes/test/e2e/framework/provider_less.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/e2e/framework/provider_less.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
//go:build providerless
|
||||
// +build providerless
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
func init() {
|
||||
// fake "gce"
|
||||
RegisterProvider("gce", func() (ProviderInterface, error) {
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
}
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
@ -93,6 +93,9 @@ type PersistentVolumeConfig struct {
|
||||
// [Optional] Labels contains information used to organize and categorize
|
||||
// objects
|
||||
Labels labels.Set
|
||||
// [Optional] Annotations contains information used to organize and categorize
|
||||
// objects
|
||||
Annotations map[string]string
|
||||
// PVSource contains the details of the underlying volume and must be set
|
||||
PVSource v1.PersistentVolumeSource
|
||||
// [Optional] Prebind lets you specify a PVC to bind this PV to before
|
||||
@ -124,10 +127,11 @@ type PersistentVolumeClaimConfig struct {
|
||||
// unspecified
|
||||
ClaimSize string
|
||||
// AccessModes defaults to RWO if unspecified
|
||||
AccessModes []v1.PersistentVolumeAccessMode
|
||||
Annotations map[string]string
|
||||
Selector *metav1.LabelSelector
|
||||
StorageClassName *string
|
||||
AccessModes []v1.PersistentVolumeAccessMode
|
||||
Annotations map[string]string
|
||||
Selector *metav1.LabelSelector
|
||||
StorageClassName *string
|
||||
VolumeAttributesClassName *string
|
||||
// VolumeMode defaults to nil if unspecified or specified as the empty
|
||||
// string
|
||||
VolumeMode *v1.PersistentVolumeMode
|
||||
@ -595,13 +599,18 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
|
||||
}
|
||||
}
|
||||
|
||||
annotations := map[string]string{
|
||||
volumeGidAnnotationKey: "777",
|
||||
}
|
||||
for k, v := range pvConfig.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Labels: pvConfig.Labels,
|
||||
Annotations: map[string]string{
|
||||
volumeGidAnnotationKey: "777",
|
||||
},
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: pvConfig.ReclaimPolicy,
|
||||
@ -653,8 +662,9 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
|
||||
v1.ResourceStorage: resource.MustParse(cfg.ClaimSize),
|
||||
},
|
||||
},
|
||||
StorageClassName: cfg.StorageClassName,
|
||||
VolumeMode: cfg.VolumeMode,
|
||||
StorageClassName: cfg.StorageClassName,
|
||||
VolumeAttributesClassName: cfg.VolumeAttributesClassName,
|
||||
VolumeMode: cfg.VolumeMode,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -56,8 +56,7 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
var featureGate featuregate.FeatureGate
|
||||
|
||||
// InitFeatureGates must be called in test suites that have a --feature-gates parameter.
|
||||
// If not called, SkipUnlessFeatureGateEnabled and SkipIfFeatureGateEnabled will
|
||||
// record a test failure.
|
||||
// If not called, SkipUnlessFeatureGateEnabled will record a test failure.
|
||||
func InitFeatureGates(defaults featuregate.FeatureGate, overrides map[string]bool) error {
|
||||
clone := defaults.DeepCopy()
|
||||
if err := clone.SetFromMap(overrides); err != nil {
|
||||
@ -67,6 +66,16 @@ func InitFeatureGates(defaults featuregate.FeatureGate, overrides map[string]boo
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFeatureGateEnabled can be used during e2e tests to figure out if a certain feature gate is enabled.
|
||||
// This function is dependent on InitFeatureGates under the hood. Therefore, the test must be called with a
|
||||
// --feature-gates parameter.
|
||||
func IsFeatureGateEnabled(feature featuregate.Feature) bool {
|
||||
if featureGate == nil {
|
||||
framework.Failf("feature gate interface is not initialized")
|
||||
}
|
||||
return featureGate.Enabled(feature)
|
||||
}
|
||||
|
||||
// SkipUnlessFeatureGateEnabled skips if the feature is disabled.
|
||||
//
|
||||
// Beware that this only works in test suites that have a --feature-gate
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -246,7 +246,7 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
|
||||
}
|
||||
client, err := ssh.Dial("tcp", host, config)
|
||||
if err != nil {
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err)
|
||||
if client, err = ssh.Dial("tcp", host, config); err != nil {
|
||||
return false, nil // retrying, error will be logged above
|
||||
@ -300,7 +300,7 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
|
||||
}
|
||||
bastionClient, err := ssh.Dial("tcp", bastion, config)
|
||||
if err != nil {
|
||||
err = wait.PollWithContext(ctx, 5*time.Second, 20*time.Second, func(ctx context.Context) (bool, error) {
|
||||
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 20*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, bastion, err)
|
||||
if bastionClient, err = ssh.Dial("tcp", bastion, config); err != nil {
|
||||
return false, err
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -399,7 +399,7 @@ func CreateGinkgoConfig() (types.SuiteConfig, types.ReporterConfig) {
|
||||
// Randomize specs as well as suites
|
||||
suiteConfig.RandomizeAllSpecs = true
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 {
|
||||
if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 && suiteConfig.LabelFilter == "" {
|
||||
suiteConfig.SkipStrings = []string{`\[Flaky\]|\[Feature:.+\]`}
|
||||
}
|
||||
return suiteConfig, reporterConfig
|
||||
|
17
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -53,7 +53,6 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
@ -132,14 +131,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
// BusyBoxImage is the image URI of BusyBox.
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
|
||||
// ProvidersWithSSH are those providers where each node is accessible with SSH
|
||||
ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"}
|
||||
|
||||
// ServeHostnameImage is a serve hostname image name.
|
||||
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
)
|
||||
|
||||
// RunID is a unique identifier of the e2e run.
|
||||
@ -245,7 +238,7 @@ func WaitForNamespacesDeleted(ctx context.Context, c clientset.Interface, namesp
|
||||
nsMap[ns] = true
|
||||
}
|
||||
//Now POLL until all namespaces have been eradicated.
|
||||
return wait.PollWithContext(ctx, 2*time.Second, timeout,
|
||||
return wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
nsList, err := c.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -423,7 +416,7 @@ func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, ski
|
||||
// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
|
||||
// Some components use EndpointSlices other Endpoints, we must verify that both objects meet the requirements.
|
||||
func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
|
||||
return wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) {
|
||||
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
endpoint, err := c.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -622,8 +615,10 @@ func CoreDump(dir string) {
|
||||
Logf("Dumping logs locally to: %s", dir)
|
||||
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir)
|
||||
}
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices)))
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_JOURNAL=%v", TestContext.DumpSystemdJournal))
|
||||
env := os.Environ()
|
||||
env = append(env, fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices)))
|
||||
env = append(env, fmt.Sprintf("LOG_DUMP_SYSTEMD_JOURNAL=%v", TestContext.DumpSystemdJournal))
|
||||
cmd.Env = env
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
25
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -267,6 +268,7 @@ var factories = map[What]ItemFactory{
|
||||
{"StatefulSet"}: &statefulSetFactory{},
|
||||
{"Deployment"}: &deploymentFactory{},
|
||||
{"StorageClass"}: &storageClassFactory{},
|
||||
{"VolumeAttributesClass"}: &volumeAttributesClassFactory{},
|
||||
{"CustomResourceDefinition"}: &customResourceDefinitionFactory{},
|
||||
}
|
||||
|
||||
@ -314,6 +316,8 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
PatchName(f, &item.Name)
|
||||
case *storagev1.StorageClass:
|
||||
PatchName(f, &item.Name)
|
||||
case *storagev1beta1.VolumeAttributesClass:
|
||||
PatchName(f, &item.Name)
|
||||
case *storagev1.CSIDriver:
|
||||
PatchName(f, &item.Name)
|
||||
case *v1.ServiceAccount:
|
||||
@ -618,6 +622,27 @@ func (*storageClassFactory) Create(ctx context.Context, f *framework.Framework,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type volumeAttributesClassFactory struct{}
|
||||
|
||||
func (f *volumeAttributesClassFactory) New() runtime.Object {
|
||||
return &storagev1beta1.VolumeAttributesClass{}
|
||||
}
|
||||
|
||||
func (*volumeAttributesClassFactory) Create(ctx context.Context, f *framework.Framework, ns *v1.Namespace, i interface{}) (func(ctx context.Context) error, error) {
|
||||
item, ok := i.(*storagev1beta1.VolumeAttributesClass)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.StorageV1beta1().VolumeAttributesClasses()
|
||||
if _, err := client.Create(ctx, item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create VolumeAttributesClass: %w", err)
|
||||
}
|
||||
return func(ctx context.Context) error {
|
||||
return client.Delete(ctx, item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type csiDriverFactory struct{}
|
||||
|
||||
func (f *csiDriverFactory) New() runtime.Object {
|
||||
|
34
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -619,6 +619,40 @@ func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.Gro
|
||||
return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout)
|
||||
}
|
||||
|
||||
// EnsureGVRDeletion checks that no object as defined by the group/version/kind and name is ever found during the given time period
|
||||
func EnsureGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error {
|
||||
var resourceClient dynamic.ResourceInterface
|
||||
if namespace != "" {
|
||||
resourceClient = c.Resource(gvr).Namespace(namespace)
|
||||
} else {
|
||||
resourceClient = c.Resource(gvr)
|
||||
}
|
||||
|
||||
err := framework.Gomega().Eventually(ctx, func(ctx context.Context) error {
|
||||
_, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{})
|
||||
return err
|
||||
}).WithTimeout(timeout).WithPolling(poll).Should(gomega.MatchError(apierrors.IsNotFound, fmt.Sprintf("failed to delete %s %s", gvr, objectName)))
|
||||
return err
|
||||
}
|
||||
|
||||
// EnsureNoGVRDeletion checks that an object as defined by the group/version/kind and name has not been deleted during the given time period
|
||||
func EnsureNoGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error {
|
||||
var resourceClient dynamic.ResourceInterface
|
||||
if namespace != "" {
|
||||
resourceClient = c.Resource(gvr).Namespace(namespace)
|
||||
} else {
|
||||
resourceClient = c.Resource(gvr)
|
||||
}
|
||||
err := framework.Gomega().Consistently(ctx, func(ctx context.Context) error {
|
||||
_, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get %s %s: %w", gvr.Resource, objectName, err)
|
||||
}
|
||||
return nil
|
||||
}).WithTimeout(timeout).WithPolling(poll).Should(gomega.Succeed())
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted
|
||||
func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
|
||||
|
@ -54,7 +54,7 @@ spec:
|
||||
hostPath:
|
||||
path: /etc/nvidia
|
||||
initContainers:
|
||||
- image: "ubuntu"
|
||||
- image: "ubuntu@sha256:3f85b7caad41a95462cf5b787d8a04604c8262cdcdf9a472b8c52ef83375fe15"
|
||||
name: bind-mount-install-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
@ -142,6 +142,6 @@ spec:
|
||||
- name: nvidia-config
|
||||
mountPath: /etc/nvidia
|
||||
containers:
|
||||
- image: "registry.k8s.io/pause:3.9"
|
||||
- image: "registry.k8s.io/pause:3.10"
|
||||
name: pause
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
labels:
|
||||
- sig/etcd
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.5.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.6.1/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.11.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.12.1/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v4.0.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v5.0.1/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
@ -32,7 +32,7 @@ rules:
|
||||
# verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
verbs: ["get", "list", "watch", "create", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.10.0/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.11.1/deploy/kubernetes//rbac.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v7.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
# for csi-driver-host-path release-1.13
|
||||
# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v8.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
|
||||
# for csi-driver-host-path release-1.14
|
||||
# by ./update-hostpath.sh
|
||||
#
|
||||
# Together with the RBAC file for external-provisioner, this YAML file
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/release-1.13/deploy/
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/release-1.14/deploy/
|
||||
|
||||
Do not edit manually. Run ./update-hostpath.sh to refresh the content.
|
||||
|
@ -219,7 +219,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -262,7 +262,7 @@ spec:
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.11.0
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.12.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
@ -276,7 +276,7 @@ spec:
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -310,7 +310,7 @@ spec:
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -324,7 +324,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -340,7 +340,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
@ -354,7 +354,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
@ -66,7 +66,7 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: socat
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
command:
|
||||
- socat
|
||||
args:
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -34,7 +34,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -35,7 +35,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
|
51
vendor/k8s.io/kubernetes/test/utils/audit.go
generated
vendored
51
vendor/k8s.io/kubernetes/test/utils/audit.go
generated
vendored
@ -66,6 +66,7 @@ type MissingEventsReport struct {
|
||||
LastEventChecked *auditinternal.Event
|
||||
NumEventsChecked int
|
||||
MissingEvents []AuditEvent
|
||||
AllEvents []AuditEvent
|
||||
}
|
||||
|
||||
// String returns a human readable string representation of the report
|
||||
@ -118,6 +119,7 @@ func CheckAuditLinesFiltered(stream io.Reader, expected []AuditEvent, version sc
|
||||
}
|
||||
|
||||
expectations.Mark(event)
|
||||
missingReport.AllEvents = append(missingReport.AllEvents, event)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return missingReport, err
|
||||
@ -128,55 +130,6 @@ func CheckAuditLinesFiltered(stream io.Reader, expected []AuditEvent, version sc
|
||||
return missingReport, nil
|
||||
}
|
||||
|
||||
// CheckAuditList searches an audit event list for the expected audit events.
|
||||
func CheckAuditList(el auditinternal.EventList, expected []AuditEvent) (missing []AuditEvent, err error) {
|
||||
expectations := newAuditEventTracker(expected)
|
||||
|
||||
for _, e := range el.Items {
|
||||
event, err := testEventFromInternal(&e)
|
||||
if err != nil {
|
||||
return expected, err
|
||||
}
|
||||
|
||||
expectations.Mark(event)
|
||||
}
|
||||
|
||||
return expectations.Missing(), nil
|
||||
}
|
||||
|
||||
// CheckForDuplicates checks a list for duplicate events
|
||||
func CheckForDuplicates(el auditinternal.EventList) (auditinternal.EventList, error) {
|
||||
// existingEvents holds a slice of audit events that have been seen
|
||||
existingEvents := []AuditEvent{}
|
||||
duplicates := auditinternal.EventList{}
|
||||
for _, e := range el.Items {
|
||||
event, err := testEventFromInternal(&e)
|
||||
if err != nil {
|
||||
return duplicates, err
|
||||
}
|
||||
event.ID = e.AuditID
|
||||
for _, existing := range existingEvents {
|
||||
if reflect.DeepEqual(existing, event) {
|
||||
duplicates.Items = append(duplicates.Items, e)
|
||||
continue
|
||||
}
|
||||
}
|
||||
existingEvents = append(existingEvents, event)
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(duplicates.Items) > 0 {
|
||||
err = fmt.Errorf("failed duplicate check")
|
||||
}
|
||||
|
||||
return duplicates, err
|
||||
}
|
||||
|
||||
// testEventFromInternal takes an internal audit event and returns a test event
|
||||
func testEventFromInternal(e *auditinternal.Event) (AuditEvent, error) {
|
||||
return testEventFromInternalFiltered(e, nil)
|
||||
}
|
||||
|
||||
// testEventFromInternalFiltered takes an internal audit event and returns a test event, customAnnotationsFilter
|
||||
// controls which audit annotations are added to AuditEvent.CustomAuditAnnotations.
|
||||
// If the customAnnotationsFilter is nil, AuditEvent.CustomAuditAnnotations will be empty.
|
||||
|
10
vendor/k8s.io/kubernetes/test/utils/conditions.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/utils/conditions.go
generated
vendored
@ -103,13 +103,3 @@ func TerminatedContainers(pod *v1.Pod) map[string]string {
|
||||
}
|
||||
return states
|
||||
}
|
||||
|
||||
// PodNotReady checks whether pod p's has a ready condition of status false.
|
||||
func PodNotReady(p *v1.Pod) (bool, error) {
|
||||
// Check the ready condition is false.
|
||||
if podutil.IsPodReady(p) {
|
||||
return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v",
|
||||
p.ObjectMeta.Name, p.Spec.NodeName, v1.PodReady, v1.ConditionFalse, p.Status.Conditions)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
105
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
105
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
@ -24,9 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -121,74 +118,6 @@ func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *a
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *apps.DaemonSet) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Job) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Secret) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1.ConfigMap) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.Service) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
@ -206,40 +135,6 @@ func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.S
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateStorageClassWithRetries(c clientset.Interface, obj *storage.StorageClass) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj *v1.ResourceQuota) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if isGenerateNameConflict(obj.ObjectMeta, err) {
|
||||
return false, nil
|
||||
}
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.PersistentVolume) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
|
12
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -56,14 +55,3 @@ func DeleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam
|
||||
return fmt.Errorf("unsupported kind when deleting: %v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
deleteFunc := func() (bool, error) {
|
||||
err := DeleteResource(c, kind, namespace, name, options)
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(deleteFunc)
|
||||
}
|
||||
|
19
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
@ -328,25 +328,6 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
|
||||
}, desiredGeneration, 2*time.Second, 1*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string, pollInterval, pollTimeout time.Duration) error {
|
||||
err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Rollback not set or is kicked off
|
||||
if deployment.Annotations[apps.DeprecatedRollbackTo] == "" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64, pollInterval, pollTimeout time.Duration) error {
|
||||
var deployment *apps.Deployment
|
||||
|
18
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -206,29 +206,21 @@ const (
|
||||
Pause
|
||||
// Perl image
|
||||
Perl
|
||||
// PrometheusDummyExporter image
|
||||
PrometheusDummyExporter
|
||||
// PrometheusToSd image
|
||||
PrometheusToSd
|
||||
// Redis image
|
||||
Redis
|
||||
// RegressionIssue74839 image
|
||||
RegressionIssue74839
|
||||
// ResourceConsumer image
|
||||
ResourceConsumer
|
||||
// SdDummyExporter image
|
||||
SdDummyExporter
|
||||
// VolumeNFSServer image
|
||||
VolumeNFSServer
|
||||
// VolumeISCSIServer image
|
||||
VolumeISCSIServer
|
||||
// VolumeRBDServer image
|
||||
VolumeRBDServer
|
||||
)
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
|
||||
configs := map[ImageID]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.47"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.52"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
@ -238,7 +230,7 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.5.6"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.12-0"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.15-0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
@ -255,17 +247,13 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
|
||||
configs[Nonewprivs] = Config{list.PromoterE2eRegistry, "nonewprivs", "1.3"}
|
||||
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.4"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.9"}
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.10"}
|
||||
configs[Perl] = Config{list.PromoterE2eRegistry, "perl", "5.26"}
|
||||
configs[PrometheusDummyExporter] = Config{list.GcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{list.GcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-3"}
|
||||
configs[RegressionIssue74839] = Config{list.PromoterE2eRegistry, "regression-issue-74839", "1.2"}
|
||||
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.13"}
|
||||
configs[SdDummyExporter] = Config{list.GcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.4"}
|
||||
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.6"}
|
||||
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.6"}
|
||||
|
||||
// This adds more config entries. Those have no pre-defined ImageID number,
|
||||
// but will be used via ReplaceRegistryInImageURL when deploying
|
||||
|
1
vendor/k8s.io/kubernetes/test/utils/kubeconfig/kubeconfig.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/utils/kubeconfig/kubeconfig.go
generated
vendored
@ -45,6 +45,7 @@ func CreateKubeConfig(clientCfg *rest.Config) *clientcmdapi.Config {
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = clientCfg.Host
|
||||
cluster.TLSServerName = clientCfg.ServerName
|
||||
cluster.CertificateAuthority = clientCfg.CAFile
|
||||
if len(cluster.CertificateAuthority) == 0 {
|
||||
cluster.CertificateAuthorityData = clientCfg.CAData
|
||||
|
23
vendor/k8s.io/kubernetes/test/utils/replicaset.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/utils/replicaset.go
generated
vendored
@ -67,26 +67,3 @@ func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *apps.ReplicaS
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpdateReplicaSetStatusWithRetries(c clientset.Interface, namespace, name string, applyUpdate UpdateReplicaSetFunc, logf LogfFn, pollInterval, pollTimeout time.Duration) (*apps.ReplicaSet, error) {
|
||||
var rs *apps.ReplicaSet
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
|
||||
var err error
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(rs)
|
||||
if rs, err = c.AppsV1().ReplicaSets(namespace).UpdateStatus(context.TODO(), rs, metav1.UpdateOptions{}); err == nil {
|
||||
logf("Updating replica set %q", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if wait.Interrupted(pollErr) {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided update to replicaset %q: %v", name, updateErr)
|
||||
}
|
||||
return rs, pollErr
|
||||
}
|
||||
|
551
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
551
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -26,11 +26,9 @@ import (
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -42,13 +40,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
@ -66,7 +61,7 @@ func removePtr(replicas *int32) int32 {
|
||||
return *replicas
|
||||
}
|
||||
|
||||
func WaitUntilPodIsScheduled(ctx context.Context, c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) {
|
||||
func waitUntilPodIsScheduled(ctx context.Context, c clientset.Interface, name, namespace string, timeout time.Duration) (*v1.Pod, error) {
|
||||
// Wait until it's scheduled
|
||||
p, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{ResourceVersion: "0"})
|
||||
if err == nil && p.Spec.NodeName != "" {
|
||||
@ -90,7 +85,7 @@ func RunPodAndGetNodeName(ctx context.Context, c clientset.Interface, pod *v1.Po
|
||||
if err := CreatePodWithRetries(c, namespace, pod); err != nil {
|
||||
return "", err
|
||||
}
|
||||
p, err := WaitUntilPodIsScheduled(ctx, c, name, namespace, timeout)
|
||||
p, err := waitUntilPodIsScheduled(ctx, c, name, namespace, timeout)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -218,11 +213,11 @@ type podInfo struct {
|
||||
phase string
|
||||
}
|
||||
|
||||
// PodDiff is a map of pod name to podInfos
|
||||
type PodDiff map[string]*podInfo
|
||||
// podDiff is a map of pod name to podInfos
|
||||
type podDiff map[string]*podInfo
|
||||
|
||||
// Print formats and prints the give PodDiff.
|
||||
func (p PodDiff) String(ignorePhases sets.String) string {
|
||||
// Print formats and prints the give podDiff.
|
||||
func (p podDiff) String(ignorePhases sets.String) string {
|
||||
ret := ""
|
||||
for name, info := range p {
|
||||
if ignorePhases.Has(info.phase) {
|
||||
@ -259,7 +254,7 @@ func (p PodDiff) String(ignorePhases sets.String) string {
|
||||
|
||||
// DeletedPods returns a slice of pods that were present at the beginning
|
||||
// and then disappeared.
|
||||
func (p PodDiff) DeletedPods() []string {
|
||||
func (p podDiff) DeletedPods() []string {
|
||||
var deletedPods []string
|
||||
for podName, podInfo := range p {
|
||||
if podInfo.hostname == nonExist {
|
||||
@ -269,9 +264,9 @@ func (p PodDiff) DeletedPods() []string {
|
||||
return deletedPods
|
||||
}
|
||||
|
||||
// Diff computes a PodDiff given 2 lists of pods.
|
||||
func Diff(oldPods []*v1.Pod, curPods []*v1.Pod) PodDiff {
|
||||
podInfoMap := PodDiff{}
|
||||
// diff computes a podDiff given 2 lists of pods.
|
||||
func diff(oldPods []*v1.Pod, curPods []*v1.Pod) podDiff {
|
||||
podInfoMap := podDiff{}
|
||||
|
||||
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
|
||||
for _, pod := range curPods {
|
||||
@ -301,22 +296,6 @@ func RunDeployment(ctx context.Context, config DeploymentConfig) error {
|
||||
return config.start(ctx)
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) Run(ctx context.Context) error {
|
||||
return RunDeployment(ctx, *config)
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetKind() schema.GroupKind {
|
||||
return extensionsinternal.Kind("Deployment")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("deployments")
|
||||
}
|
||||
|
||||
func (config *DeploymentConfig) create() error {
|
||||
deployment := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -388,22 +367,6 @@ func RunReplicaSet(ctx context.Context, config ReplicaSetConfig) error {
|
||||
return config.start(ctx)
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) Run(ctx context.Context) error {
|
||||
return RunReplicaSet(ctx, *config)
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetKind() schema.GroupKind {
|
||||
return extensionsinternal.Kind("ReplicaSet")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetGroupResource() schema.GroupResource {
|
||||
return extensionsinternal.Resource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return extensionsinternal.SchemeGroupVersion.WithResource("replicasets")
|
||||
}
|
||||
|
||||
func (config *ReplicaSetConfig) create() error {
|
||||
rs := &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -459,81 +422,6 @@ func (config *ReplicaSetConfig) create() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunJob baunches (and verifies correctness) of a Job
|
||||
// and will wait for all pods it spawns to become "Running".
|
||||
// It's the caller's responsibility to clean up externally (i.e. use the
|
||||
// namespace lifecycle for handling Cleanup).
|
||||
func RunJob(ctx context.Context, config JobConfig) error {
|
||||
err := config.create()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return config.start(ctx)
|
||||
}
|
||||
|
||||
func (config *JobConfig) Run(ctx context.Context) error {
|
||||
return RunJob(ctx, *config)
|
||||
}
|
||||
|
||||
func (config *JobConfig) GetKind() schema.GroupKind {
|
||||
return batchinternal.Kind("Job")
|
||||
}
|
||||
|
||||
func (config *JobConfig) GetGroupResource() schema.GroupResource {
|
||||
return batchinternal.Resource("jobs")
|
||||
}
|
||||
|
||||
func (config *JobConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return batchinternal.SchemeGroupVersion.WithResource("jobs")
|
||||
}
|
||||
|
||||
func (config *JobConfig) create() error {
|
||||
job := &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Name,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Parallelism: pointer.Int32(int32(config.Replicas)),
|
||||
Completions: pointer.Int32(int32(config.Replicas)),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": config.Name},
|
||||
Annotations: config.Annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: config.Affinity,
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Lifecycle: config.Lifecycle,
|
||||
SecurityContext: config.SecurityContext,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if len(config.SecretNames) > 0 {
|
||||
attachSecrets(&job.Spec.Template, config.SecretNames)
|
||||
}
|
||||
if len(config.ConfigMapNames) > 0 {
|
||||
attachConfigMaps(&job.Spec.Template, config.ConfigMapNames)
|
||||
}
|
||||
|
||||
config.applyTo(&job.Spec.Template)
|
||||
|
||||
if err := CreateJobWithRetries(config.Client, config.Namespace, job); err != nil {
|
||||
return fmt.Errorf("error creating job: %v", err)
|
||||
}
|
||||
config.RCConfigLog("Created job with name: %v, namespace: %v, parallelism/completions: %v", job.Name, config.Namespace, job.Spec.Parallelism)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunRC Launches (and verifies correctness) of a Replication Controller
|
||||
// and will wait for all pods it spawns to become "Running".
|
||||
// It's the caller's responsibility to clean up externally (i.e. use the
|
||||
@ -546,55 +434,6 @@ func RunRC(ctx context.Context, config RCConfig) error {
|
||||
return config.start(ctx)
|
||||
}
|
||||
|
||||
func (config *RCConfig) Run(ctx context.Context) error {
|
||||
return RunRC(ctx, *config)
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetName() string {
|
||||
return config.Name
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetNamespace() string {
|
||||
return config.Namespace
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetKind() schema.GroupKind {
|
||||
return api.Kind("ReplicationController")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetGroupResource() schema.GroupResource {
|
||||
return api.Resource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetGroupVersionResource() schema.GroupVersionResource {
|
||||
return api.SchemeGroupVersion.WithResource("replicationcontrollers")
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetClient() clientset.Interface {
|
||||
return config.Client
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetScalesGetter() scaleclient.ScalesGetter {
|
||||
return config.ScalesGetter
|
||||
}
|
||||
|
||||
func (config *RCConfig) SetClient(c clientset.Interface) {
|
||||
config.Client = c
|
||||
}
|
||||
|
||||
func (config *RCConfig) SetScalesClient(getter scaleclient.ScalesGetter) {
|
||||
config.ScalesGetter = getter
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetReplicas() int {
|
||||
return config.Replicas
|
||||
}
|
||||
|
||||
func (config *RCConfig) GetLabelValue(key string) (string, bool) {
|
||||
value, found := config.Labels[key]
|
||||
return value, found
|
||||
}
|
||||
|
||||
func (config *RCConfig) create() error {
|
||||
dnsDefault := v1.DNSDefault
|
||||
if config.DNSPolicy == nil {
|
||||
@ -737,7 +576,7 @@ func (s *RCStartupStatus) String(name string) string {
|
||||
name, len(s.Created), s.Expected, s.Running, s.Pending, s.Waiting, s.Inactive, s.Terminating, s.Unknown, s.RunningButNotReady)
|
||||
}
|
||||
|
||||
func ComputeRCStartupStatus(pods []*v1.Pod, expected int) RCStartupStatus {
|
||||
func computeRCStartupStatus(pods []*v1.Pod, expected int) RCStartupStatus {
|
||||
startupStatus := RCStartupStatus{
|
||||
Expected: expected,
|
||||
Created: make([]*v1.Pod, 0, expected),
|
||||
@ -819,7 +658,7 @@ func (config *RCConfig) start(ctx context.Context) error {
|
||||
time.Sleep(interval)
|
||||
|
||||
pods := ps.List()
|
||||
startupStatus := ComputeRCStartupStatus(pods, config.Replicas)
|
||||
startupStatus := computeRCStartupStatus(pods, config.Replicas)
|
||||
|
||||
if config.CreatedPods != nil {
|
||||
*config.CreatedPods = startupStatus.Created
|
||||
@ -843,7 +682,7 @@ func (config *RCConfig) start(ctx context.Context) error {
|
||||
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
|
||||
}
|
||||
|
||||
diff := Diff(oldPods, pods)
|
||||
diff := diff(oldPods, pods)
|
||||
deletedPods := diff.DeletedPods()
|
||||
podDeletionsCount += len(deletedPods)
|
||||
if podDeletionsCount > config.MaxAllowedPodDeletions {
|
||||
@ -950,16 +789,6 @@ func WaitForEnoughPodsWithLabelRunning(c clientset.Interface, ns string, label l
|
||||
return nil
|
||||
}
|
||||
|
||||
type CountToStrategy struct {
|
||||
Count int
|
||||
Strategy PrepareNodeStrategy
|
||||
}
|
||||
|
||||
type TestNodePreparer interface {
|
||||
PrepareNodes(ctx context.Context, nextNodeIndex int) error
|
||||
CleanupNodes(ctx context.Context) error
|
||||
}
|
||||
|
||||
type PrepareNodeStrategy interface {
|
||||
// Modify pre-created Node objects before the test starts.
|
||||
PreparePatch(node *v1.Node) []byte
|
||||
@ -1242,46 +1071,6 @@ func DoPrepareNode(ctx context.Context, client clientset.Interface, node *v1.Nod
|
||||
return nil
|
||||
}
|
||||
|
||||
func DoCleanupNode(ctx context.Context, client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
var node *v1.Node
|
||||
node, err = client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
|
||||
}
|
||||
updatedNode := strategy.CleanupNode(ctx, node)
|
||||
if apiequality.Semantic.DeepEqual(node, updatedNode) {
|
||||
return nil
|
||||
}
|
||||
if _, err = client.CoreV1().Nodes().Update(ctx, updatedNode, metav1.UpdateOptions{}); err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("error when updating Node %v: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("too many conflicts when trying to cleanup Node %v: %s", nodeName, err)
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
err = strategy.CleanupDependentObjects(ctx, nodeName, client)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("error when cleaning up Node %v objects: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("too many conflicts when trying to cleanup Node %v objects: %s", nodeName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type TestPodCreateStrategy func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error
|
||||
|
||||
type CountToPodStrategy struct {
|
||||
@ -1296,6 +1085,16 @@ func NewTestPodCreatorConfig() *TestPodCreatorConfig {
|
||||
return &config
|
||||
}
|
||||
|
||||
type CountToStrategy struct {
|
||||
Count int
|
||||
Strategy PrepareNodeStrategy
|
||||
}
|
||||
|
||||
type TestNodePreparer interface {
|
||||
PrepareNodes(ctx context.Context, nextNodeIndex int) error
|
||||
CleanupNodes(ctx context.Context) error
|
||||
}
|
||||
|
||||
func (c *TestPodCreatorConfig) AddStrategy(
|
||||
namespace string, podCount int, strategy TestPodCreateStrategy) {
|
||||
(*c)[namespace] = append((*c)[namespace], CountToPodStrategy{Count: podCount, Strategy: strategy})
|
||||
@ -1329,7 +1128,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "registry.k8s.io/pause:3.9",
|
||||
Image: imageutils.GetE2EImage(imageutils.Pause),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
@ -1352,14 +1151,22 @@ func makeCreatePod(client clientset.Interface, namespace string, podTemplate *v1
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error {
|
||||
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, podCount int, podTemplate PodTemplate) error {
|
||||
var createError error
|
||||
lock := sync.Mutex{}
|
||||
createPodFunc := func(i int) {
|
||||
pod, err := podTemplate.GetPodTemplate(i, podCount)
|
||||
if err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = err
|
||||
return
|
||||
}
|
||||
pod = pod.DeepCopy()
|
||||
// client-go writes into the object that is passed to Create,
|
||||
// causing a data race unless we create a new copy for each
|
||||
// parallel call.
|
||||
if err := makeCreatePod(client, namespace, podTemplate.DeepCopy()); err != nil {
|
||||
if err := makeCreatePod(client, namespace, pod); err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = err
|
||||
@ -1374,7 +1181,7 @@ func CreatePod(ctx context.Context, client clientset.Interface, namespace string
|
||||
return createError
|
||||
}
|
||||
|
||||
func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod, count int, bindVolume bool) error {
|
||||
func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interface, namespace string, claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate PodTemplate, count int, bindVolume bool) error {
|
||||
var createError error
|
||||
lock := sync.Mutex{}
|
||||
createPodFunc := func(i int) {
|
||||
@ -1435,7 +1242,14 @@ func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interfa
|
||||
}
|
||||
|
||||
// pod
|
||||
pod := podTemplate.DeepCopy()
|
||||
pod, err := podTemplate.GetPodTemplate(i, count)
|
||||
if err != nil {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
createError = fmt.Errorf("error getting pod template: %s", err)
|
||||
return
|
||||
}
|
||||
pod = pod.DeepCopy()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "vol",
|
||||
@ -1462,29 +1276,7 @@ func CreatePodWithPersistentVolume(ctx context.Context, client clientset.Interfa
|
||||
return createError
|
||||
}
|
||||
|
||||
func createController(client clientset.Interface, controllerName, namespace string, podCount int, podTemplate *v1.Pod) error {
|
||||
rc := &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controllerName,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: pointer.Int32(int32(podCount)),
|
||||
Selector: map[string]string{"name": controllerName},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": controllerName},
|
||||
},
|
||||
Spec: podTemplate.Spec,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := CreateRCWithRetries(client, namespace, rc); err != nil {
|
||||
return fmt.Errorf("error creating replication controller: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
func NewCustomCreatePodStrategy(podTemplate PodTemplate) TestPodCreateStrategy {
|
||||
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
|
||||
return CreatePod(ctx, client, namespace, podCount, podTemplate)
|
||||
}
|
||||
@ -1493,113 +1285,37 @@ func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
// volumeFactory creates an unique PersistentVolume for given integer.
|
||||
type volumeFactory func(uniqueID int) *v1.PersistentVolume
|
||||
|
||||
func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
// PodTemplate is responsible for creating a v1.Pod instance that is ready
|
||||
// to be sent to the API server.
|
||||
type PodTemplate interface {
|
||||
// GetPodTemplate returns a pod template for one out of many different pods.
|
||||
// Pods with numbers in the range [index, index+count-1] will be created
|
||||
// based on what GetPodTemplate returns. It gets called multiple times
|
||||
// with a fixed index and increasing count parameters. This number can,
|
||||
// but doesn't have to be, used to modify parts of the pod spec like
|
||||
// for example a named reference to some other object.
|
||||
GetPodTemplate(index, count int) (*v1.Pod, error)
|
||||
}
|
||||
|
||||
// StaticPodTemplate returns an implementation of PodTemplate for a fixed pod that is the same regardless of the index.
|
||||
func StaticPodTemplate(pod *v1.Pod) PodTemplate {
|
||||
return (*staticPodTemplate)(pod)
|
||||
}
|
||||
|
||||
type staticPodTemplate v1.Pod
|
||||
|
||||
// GetPodTemplate implements [PodTemplate.GetPodTemplate] by returning the same pod
|
||||
// for each call.
|
||||
func (s *staticPodTemplate) GetPodTemplate(index, count int) (*v1.Pod, error) {
|
||||
return (*v1.Pod)(s), nil
|
||||
}
|
||||
|
||||
func NewCreatePodWithPersistentVolumeStrategy(claimTemplate *v1.PersistentVolumeClaim, factory volumeFactory, podTemplate PodTemplate) TestPodCreateStrategy {
|
||||
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
|
||||
return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factory, podTemplate, podCount, true /* bindVolume */)
|
||||
}
|
||||
}
|
||||
|
||||
func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
|
||||
StorageClassName: &storageClass,
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewCreatePodWithPersistentVolumeWithFirstConsumerStrategy(factory volumeFactory, podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
|
||||
volumeBindingMode := storagev1.VolumeBindingWaitForFirstConsumer
|
||||
storageClass := &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "storagev1-class-1",
|
||||
},
|
||||
Provisioner: "kubernetes.io/gce-pd",
|
||||
VolumeBindingMode: &volumeBindingMode,
|
||||
}
|
||||
claimTemplate := makeUnboundPersistentVolumeClaim(storageClass.Name)
|
||||
|
||||
if err := CreateStorageClassWithRetries(client, storageClass); err != nil {
|
||||
return fmt.Errorf("failed to create storagev1 class: %v", err)
|
||||
}
|
||||
|
||||
factoryWithStorageClass := func(i int) *v1.PersistentVolume {
|
||||
pv := factory(i)
|
||||
pv.Spec.StorageClassName = storageClass.Name
|
||||
return pv
|
||||
}
|
||||
|
||||
return CreatePodWithPersistentVolume(ctx, client, namespace, claimTemplate, factoryWithStorageClass, podTemplate, podCount, false /* bindVolume */)
|
||||
}
|
||||
}
|
||||
|
||||
func NewSimpleCreatePodStrategy() TestPodCreateStrategy {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "simple-pod-",
|
||||
},
|
||||
Spec: MakePodSpec(),
|
||||
}
|
||||
return NewCustomCreatePodStrategy(basePod)
|
||||
}
|
||||
|
||||
func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCreateStrategy {
|
||||
return func(ctx context.Context, client clientset.Interface, namespace string, podCount int) error {
|
||||
basePod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: controllerName + "-pod-",
|
||||
Labels: map[string]string{"name": controllerName},
|
||||
},
|
||||
Spec: MakePodSpec(),
|
||||
}
|
||||
if err := createController(client, controllerName, namespace, podCount, basePod); err != nil {
|
||||
return err
|
||||
}
|
||||
return CreatePod(ctx, client, namespace, podCount, basePod)
|
||||
}
|
||||
}
|
||||
|
||||
type SecretConfig struct {
|
||||
Content map[string]string
|
||||
Client clientset.Interface
|
||||
Name string
|
||||
Namespace string
|
||||
// If set this function will be used to print log lines instead of klog.
|
||||
LogFunc func(fmt string, args ...interface{})
|
||||
}
|
||||
|
||||
func (config *SecretConfig) Run() error {
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Name,
|
||||
},
|
||||
StringData: map[string]string{},
|
||||
}
|
||||
for k, v := range config.Content {
|
||||
secret.StringData[k] = v
|
||||
}
|
||||
|
||||
if err := CreateSecretWithRetries(config.Client, config.Namespace, secret); err != nil {
|
||||
return fmt.Errorf("error creating secret: %v", err)
|
||||
}
|
||||
config.LogFunc("Created secret %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (config *SecretConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("error deleting secret: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: attach secrets using different possibilities: env vars, image pull secrets.
|
||||
func attachSecrets(template *v1.PodTemplateSpec, secretNames []string) {
|
||||
volumes := make([]v1.Volume, 0, len(secretNames))
|
||||
@ -1623,41 +1339,6 @@ func attachSecrets(template *v1.PodTemplateSpec, secretNames []string) {
|
||||
template.Spec.Containers[0].VolumeMounts = mounts
|
||||
}
|
||||
|
||||
type ConfigMapConfig struct {
|
||||
Content map[string]string
|
||||
Client clientset.Interface
|
||||
Name string
|
||||
Namespace string
|
||||
// If set this function will be used to print log lines instead of klog.
|
||||
LogFunc func(fmt string, args ...interface{})
|
||||
}
|
||||
|
||||
func (config *ConfigMapConfig) Run() error {
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Name,
|
||||
},
|
||||
Data: map[string]string{},
|
||||
}
|
||||
for k, v := range config.Content {
|
||||
configMap.Data[k] = v
|
||||
}
|
||||
|
||||
if err := CreateConfigMapWithRetries(config.Client, config.Namespace, configMap); err != nil {
|
||||
return fmt.Errorf("error creating configmap: %v", err)
|
||||
}
|
||||
config.LogFunc("Created configmap %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (config *ConfigMapConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("error deleting configmap: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: attach configmaps using different possibilities: env vars.
|
||||
func attachConfigMaps(template *v1.PodTemplateSpec, configMapNames []string) {
|
||||
volumes := make([]v1.Volume, 0, len(configMapNames))
|
||||
@ -1740,93 +1421,3 @@ func attachServiceAccountTokenProjection(template *v1.PodTemplateSpec, name stri
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type DaemonConfig struct {
|
||||
Client clientset.Interface
|
||||
Name string
|
||||
Namespace string
|
||||
Image string
|
||||
// If set this function will be used to print log lines instead of klog.
|
||||
LogFunc func(fmt string, args ...interface{})
|
||||
// How long we wait for DaemonSet to become running.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
func (config *DaemonConfig) Run(ctx context.Context) error {
|
||||
if config.Image == "" {
|
||||
config.Image = "registry.k8s.io/pause:3.9"
|
||||
}
|
||||
nameLabel := map[string]string{
|
||||
"name": config.Name + "-daemon",
|
||||
}
|
||||
daemon := &apps.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Name,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: nameLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := CreateDaemonSetWithRetries(config.Client, config.Namespace, daemon); err != nil {
|
||||
return fmt.Errorf("error creating daemonset: %v", err)
|
||||
}
|
||||
|
||||
var nodes *v1.NodeList
|
||||
var err error
|
||||
for i := 0; i < retries; i++ {
|
||||
// Wait for all daemons to be running
|
||||
nodes, err = config.Client.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
|
||||
if err == nil {
|
||||
break
|
||||
} else if i+1 == retries {
|
||||
return fmt.Errorf("error listing Nodes while waiting for DaemonSet %v: %v", config.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
timeout := config.Timeout
|
||||
if timeout <= 0 {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
|
||||
ps, err := NewPodStore(config.Client, config.Namespace, labels.SelectorFromSet(nameLabel), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ps.Stop()
|
||||
|
||||
err = wait.Poll(time.Second, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
|
||||
nodeHasDaemon := sets.NewString()
|
||||
for _, pod := range pods {
|
||||
podReady, _ := PodRunningReady(pod)
|
||||
if pod.Spec.NodeName != "" && podReady {
|
||||
nodeHasDaemon.Insert(pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
running := len(nodeHasDaemon)
|
||||
config.LogFunc("Found %v/%v Daemons %v running", running, config.Name, len(nodes.Items))
|
||||
return running == len(nodes.Items), nil
|
||||
})
|
||||
if err != nil {
|
||||
config.LogFunc("Timed out while waiting for DaemonSet %v/%v to be running.", config.Namespace, config.Name)
|
||||
} else {
|
||||
config.LogFunc("Created Daemon %v/%v", config.Namespace, config.Name)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user