mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes and libraries to v1.22.0 version
Kubernetes v1.22 version has been released and this update ceph csi dependencies to use the same version. Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
e077c1fdf5
commit
aa698bc3e1
6
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
@ -18,10 +18,10 @@ package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -99,7 +99,7 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "binding clusterrole/%s for %q for %v", clusterRole, ns, subjects)
|
||||
return fmt.Errorf("binding clusterrole/%s for %q for %v: %w", clusterRole, ns, subjects, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -136,7 +136,7 @@ func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rb
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "binding %s/%s into %q for %v", roleType, role, ns, subjects)
|
||||
return fmt.Errorf("binding %s/%s into %q for %v: %w", roleType, role, ns, subjects, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
@ -17,6 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -70,6 +73,7 @@ func RunCleanupActions() {
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
e2elog.Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
|
||||
fn()
|
||||
}
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -296,7 +296,7 @@ func (f *Framework) BeforeEach() {
|
||||
|
||||
gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master"
|
||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
} else {
|
||||
@ -449,7 +449,7 @@ func (f *Framework) AfterEach() {
|
||||
ginkgo.By("Gathering metrics")
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
@ -142,6 +142,10 @@ func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, p
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
// TODO(mauriciopoppe): remove this statement once we add `sync` to the test image, ref #101172
|
||||
if e2epod.NodeOSDistroIs("windows") {
|
||||
command = fmt.Sprintf("echo '%s' > '%s';", contents, path)
|
||||
}
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
@ -139,7 +139,7 @@ func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (KubeletM
|
||||
if c == nil {
|
||||
return GrabKubeletMetricsWithoutProxy(nodeName, "/metrics")
|
||||
}
|
||||
grabber, err := NewMetricsGrabber(c, nil, true, false, false, false, false)
|
||||
grabber, err := NewMetricsGrabber(c, nil, nil, true, false, false, false, false, false)
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
|
292
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
292
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -18,62 +18,89 @@ package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
// insecureSchedulerPort is the default port for the scheduler status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeSchedulerPort instead.
|
||||
insecureSchedulerPort = 10251
|
||||
// insecureKubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeControllerManagerPort instead.
|
||||
insecureKubeControllerManagerPort = 10252
|
||||
// kubeSchedulerPort is the default port for the scheduler status server.
|
||||
kubeSchedulerPort = 10259
|
||||
// kubeControllerManagerPort is the default port for the controller manager status server.
|
||||
kubeControllerManagerPort = 10257
|
||||
// snapshotControllerPort is the port for the snapshot controller
|
||||
snapshotControllerPort = 9102
|
||||
)
|
||||
|
||||
// MetricsGrabbingDisabledError is an error that is wrapped by the
|
||||
// different MetricsGrabber.Wrap functions when metrics grabbing is
|
||||
// not supported. Tests that check metrics data should then skip
|
||||
// the check.
|
||||
var MetricsGrabbingDisabledError = errors.New("metrics grabbing disabled")
|
||||
|
||||
// Collection is metrics collection of components
|
||||
type Collection struct {
|
||||
APIServerMetrics APIServerMetrics
|
||||
ControllerManagerMetrics ControllerManagerMetrics
|
||||
KubeletMetrics map[string]KubeletMetrics
|
||||
SchedulerMetrics SchedulerMetrics
|
||||
ClusterAutoscalerMetrics ClusterAutoscalerMetrics
|
||||
APIServerMetrics APIServerMetrics
|
||||
ControllerManagerMetrics ControllerManagerMetrics
|
||||
SnapshotControllerMetrics SnapshotControllerMetrics
|
||||
KubeletMetrics map[string]KubeletMetrics
|
||||
SchedulerMetrics SchedulerMetrics
|
||||
ClusterAutoscalerMetrics ClusterAutoscalerMetrics
|
||||
}
|
||||
|
||||
// Grabber provides functions which grab metrics from components
|
||||
type Grabber struct {
|
||||
client clientset.Interface
|
||||
externalClient clientset.Interface
|
||||
grabFromAPIServer bool
|
||||
grabFromControllerManager bool
|
||||
grabFromKubelets bool
|
||||
grabFromScheduler bool
|
||||
grabFromClusterAutoscaler bool
|
||||
kubeScheduler string
|
||||
kubeControllerManager string
|
||||
waitForControllerManagerReadyOnce sync.Once
|
||||
client clientset.Interface
|
||||
externalClient clientset.Interface
|
||||
config *rest.Config
|
||||
grabFromAPIServer bool
|
||||
grabFromControllerManager bool
|
||||
grabFromKubelets bool
|
||||
grabFromScheduler bool
|
||||
grabFromClusterAutoscaler bool
|
||||
grabFromSnapshotController bool
|
||||
kubeScheduler string
|
||||
waitForSchedulerReadyOnce sync.Once
|
||||
kubeControllerManager string
|
||||
waitForControllerManagerReadyOnce sync.Once
|
||||
snapshotController string
|
||||
waitForSnapshotControllerReadyOnce sync.Once
|
||||
}
|
||||
|
||||
// NewMetricsGrabber returns new metrics which are initialized.
|
||||
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) {
|
||||
// NewMetricsGrabber prepares for grabbing metrics data from several different
|
||||
// components. It should be called when those components are running because
|
||||
// it needs to communicate with them to determine for which components
|
||||
// metrics data can be retrieved.
|
||||
//
|
||||
// Collecting metrics data is an optional debug feature. Not all clusters will
|
||||
// support it. If disabled for a component, the corresponding Grab function
|
||||
// will immediately return an error derived from MetricsGrabbingDisabledError.
|
||||
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) {
|
||||
|
||||
kubeScheduler := ""
|
||||
kubeControllerManager := ""
|
||||
snapshotControllerManager := ""
|
||||
|
||||
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
|
||||
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
|
||||
regSnapshotController := regexp.MustCompile("volume-snapshot-controller.*")
|
||||
|
||||
if (scheduler || controllers) && config == nil {
|
||||
return nil, errors.New("a rest config is required for grabbing kube-controller and kube-controller-manager metrics")
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -89,35 +116,55 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
|
||||
if regKubeControllerManager.MatchString(pod.Name) {
|
||||
kubeControllerManager = pod.Name
|
||||
}
|
||||
if kubeScheduler != "" && kubeControllerManager != "" {
|
||||
if regSnapshotController.MatchString(pod.Name) {
|
||||
snapshotControllerManager = pod.Name
|
||||
}
|
||||
if kubeScheduler != "" && kubeControllerManager != "" && snapshotControllerManager != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
if kubeScheduler == "" {
|
||||
scheduler = false
|
||||
klog.Warningf("Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled.")
|
||||
}
|
||||
if kubeControllerManager == "" {
|
||||
controllers = false
|
||||
klog.Warningf("Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled.")
|
||||
}
|
||||
if ec == nil {
|
||||
if clusterAutoscaler && ec == nil {
|
||||
klog.Warningf("Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.")
|
||||
}
|
||||
|
||||
return &Grabber{
|
||||
client: c,
|
||||
externalClient: ec,
|
||||
grabFromAPIServer: apiServer,
|
||||
grabFromControllerManager: controllers,
|
||||
grabFromKubelets: kubelets,
|
||||
grabFromScheduler: scheduler,
|
||||
grabFromClusterAutoscaler: clusterAutoscaler,
|
||||
kubeScheduler: kubeScheduler,
|
||||
kubeControllerManager: kubeControllerManager,
|
||||
client: c,
|
||||
externalClient: ec,
|
||||
config: config,
|
||||
grabFromAPIServer: apiServer,
|
||||
grabFromControllerManager: checkPodDebugHandlers(c, controllers, "kube-controller-manager", kubeControllerManager),
|
||||
grabFromKubelets: kubelets,
|
||||
grabFromScheduler: checkPodDebugHandlers(c, scheduler, "kube-scheduler", kubeScheduler),
|
||||
grabFromClusterAutoscaler: clusterAutoscaler,
|
||||
grabFromSnapshotController: checkPodDebugHandlers(c, snapshotController, "snapshot-controller", snapshotControllerManager),
|
||||
kubeScheduler: kubeScheduler,
|
||||
kubeControllerManager: kubeControllerManager,
|
||||
snapshotController: snapshotControllerManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func checkPodDebugHandlers(c clientset.Interface, requested bool, component, podName string) bool {
|
||||
if !requested {
|
||||
return false
|
||||
}
|
||||
if podName == "" {
|
||||
klog.Warningf("Can't find %s pod. Grabbing metrics from %s is disabled.", component, component)
|
||||
return false
|
||||
}
|
||||
|
||||
// The debug handlers on the host where the pod runs might be disabled.
|
||||
// We can check that indirectly by trying to retrieve log output.
|
||||
limit := int64(1)
|
||||
if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(context.TODO()); err != nil {
|
||||
klog.Warningf("Can't retrieve log output of %s (%q). Debug handlers might be disabled in kubelet. Grabbing metrics from %s is disabled.",
|
||||
podName, err, component)
|
||||
return false
|
||||
}
|
||||
|
||||
// Metrics gathering enabled.
|
||||
return true
|
||||
}
|
||||
|
||||
// HasControlPlanePods returns true if metrics grabber was able to find control-plane pods
|
||||
func (g *Grabber) HasControlPlanePods() bool {
|
||||
return g.kubeScheduler != "" && g.kubeControllerManager != ""
|
||||
@ -149,20 +196,38 @@ func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (Kub
|
||||
|
||||
// GrabFromScheduler returns metrics from scheduler
|
||||
func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
|
||||
if g.kubeScheduler == "" {
|
||||
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler pod is not registered. Skipping Scheduler's metrics gathering")
|
||||
if !g.grabFromScheduler {
|
||||
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, g.kubeScheduler, metav1.NamespaceSystem, insecureSchedulerPort)
|
||||
|
||||
var err error
|
||||
|
||||
g.waitForSchedulerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeScheduler, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
|
||||
var lastMetricsFetchErr error
|
||||
var output string
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err := fmt.Errorf("error waiting for kube-scheduler pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
|
||||
return parseSchedulerMetrics(output)
|
||||
}
|
||||
|
||||
// GrabFromClusterAutoscaler returns metrics from cluster autoscaler
|
||||
func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) {
|
||||
if !g.HasControlPlanePods() && g.externalClient == nil {
|
||||
return ClusterAutoscalerMetrics{}, fmt.Errorf("Did not find control-plane pods. Skipping ClusterAutoscaler's metrics gathering")
|
||||
return ClusterAutoscalerMetrics{}, fmt.Errorf("ClusterAutoscaler: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
var client clientset.Interface
|
||||
var namespace string
|
||||
@ -182,38 +247,73 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
|
||||
|
||||
// GrabFromControllerManager returns metrics from controller manager
|
||||
func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) {
|
||||
if g.kubeControllerManager == "" {
|
||||
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager pod is not registered. Skipping ControllerManager's metrics gathering")
|
||||
if !g.grabFromControllerManager {
|
||||
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
|
||||
var err error
|
||||
podName := g.kubeControllerManager
|
||||
g.waitForControllerManagerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for controller manager pod to be ready: %w", readyErr)
|
||||
return
|
||||
}
|
||||
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err = fmt.Errorf("error waiting for controller manager pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return
|
||||
g.waitForControllerManagerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeControllerManager, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
|
||||
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
|
||||
if err != nil {
|
||||
var output string
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err := fmt.Errorf("error waiting for kube-controller-manager to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
|
||||
return parseControllerManagerMetrics(output)
|
||||
}
|
||||
|
||||
// GrabFromSnapshotController returns metrics from controller manager
|
||||
func (g *Grabber) GrabFromSnapshotController(podName string, port int) (SnapshotControllerMetrics, error) {
|
||||
if !g.grabFromSnapshotController {
|
||||
return SnapshotControllerMetrics{}, fmt.Errorf("volume-snapshot-controller: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
|
||||
// Use overrides if provided via test config flags.
|
||||
// Otherwise, use the default volume-snapshot-controller pod name and port.
|
||||
if podName == "" {
|
||||
podName = g.snapshotController
|
||||
}
|
||||
if port == 0 {
|
||||
port = snapshotControllerPort
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
g.waitForSnapshotControllerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return SnapshotControllerMetrics{}, err
|
||||
}
|
||||
|
||||
var output string
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
output, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, port)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return SnapshotControllerMetrics{}, err
|
||||
}
|
||||
|
||||
return parseSnapshotControllerMetrics(output)
|
||||
}
|
||||
|
||||
// GrabFromAPIServer returns metrics from API server
|
||||
func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) {
|
||||
output, err := g.getMetricsFromAPIServer()
|
||||
@ -251,6 +351,14 @@ func (g *Grabber) Grab() (Collection, error) {
|
||||
result.ControllerManagerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromSnapshotController {
|
||||
metrics, err := g.GrabFromSnapshotController(g.snapshotController, snapshotControllerPort)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
result.SnapshotControllerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromClusterAutoscaler {
|
||||
metrics, err := g.GrabFromClusterAutoscaler()
|
||||
if err != nil {
|
||||
@ -281,12 +389,13 @@ func (g *Grabber) Grab() (Collection, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// getMetricsFromPod retrieves metrics data from an insecure port.
|
||||
func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) {
|
||||
rawOutput, err := client.CoreV1().RESTClient().Get().
|
||||
Namespace(namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", podName, port)).
|
||||
Name(fmt.Sprintf("%s:%d", podName, port)).
|
||||
Suffix("metrics").
|
||||
Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
@ -294,3 +403,50 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string,
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
||||
|
||||
// getSecureMetricsFromPod retrieves metrics from a pod that uses TLS
|
||||
// and checks client credentials. Conceptually this function is
|
||||
// similar to "kubectl port-forward" + "kubectl get --raw
|
||||
// https://localhost:<port>/metrics". It uses the same credentials
|
||||
// as kubelet.
|
||||
func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port int) (string, error) {
|
||||
dialer := e2epod.NewDialer(g.client, g.config)
|
||||
metricConfig := rest.CopyConfig(g.config)
|
||||
addr := e2epod.Addr{
|
||||
Namespace: namespace,
|
||||
PodName: podName,
|
||||
Port: port,
|
||||
}
|
||||
metricConfig.Dial = func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return dialer.DialContainerPort(ctx, addr)
|
||||
}
|
||||
// This should make it possible verify the server, but while it
|
||||
// got past the server name check, certificate validation
|
||||
// still failed.
|
||||
metricConfig.Host = addr.String()
|
||||
metricConfig.ServerName = "localhost"
|
||||
// Verifying the pod certificate with the same root CA
|
||||
// as for the API server led to an error about "unknown root
|
||||
// certificate". Disabling certificate checking on the client
|
||||
// side gets around that and should be good enough for
|
||||
// E2E testing.
|
||||
metricConfig.Insecure = true
|
||||
metricConfig.CAFile = ""
|
||||
metricConfig.CAData = nil
|
||||
|
||||
// clientset.NewForConfig is used because
|
||||
// metricClient.RESTClient() is directly usable, in contrast
|
||||
// to the client constructed by rest.RESTClientFor().
|
||||
metricClient, err := clientset.NewForConfig(metricConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rawOutput, err := metricClient.RESTClient().Get().
|
||||
AbsPath("metrics").
|
||||
Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
||||
|
40
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/snapshot_controller_metrics.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/snapshot_controller_metrics.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import "k8s.io/component-base/metrics/testutil"
|
||||
|
||||
// SnapshotControllerMetrics is metrics for controller manager
|
||||
type SnapshotControllerMetrics testutil.Metrics
|
||||
|
||||
// Equal returns true if all metrics are the same as the arguments.
|
||||
func (m *SnapshotControllerMetrics) Equal(o SnapshotControllerMetrics) bool {
|
||||
return (*testutil.Metrics)(m).Equal(testutil.Metrics(o))
|
||||
}
|
||||
|
||||
func newSnapshotControllerMetrics() SnapshotControllerMetrics {
|
||||
result := testutil.NewMetrics()
|
||||
return SnapshotControllerMetrics(result)
|
||||
}
|
||||
|
||||
func parseSnapshotControllerMetrics(data string) (SnapshotControllerMetrics, error) {
|
||||
result := newSnapshotControllerMetrics()
|
||||
if err := testutil.ParseMetrics(data, (*testutil.Metrics)(&result)); err != nil {
|
||||
return SnapshotControllerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
25
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -561,6 +561,31 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
|
||||
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||
}
|
||||
|
||||
// collect values of zone label from all nodes
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
// We should have at least 1 node in the zone which is schedulable.
|
||||
if !IsNodeSchedulable(&node) {
|
||||
continue
|
||||
}
|
||||
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
|
||||
if zone, found := node.Labels[v1.LabelTopologyZone]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -214,8 +214,12 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
}
|
||||
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
var terminalListNodesErr error
|
||||
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
||||
return false, err
|
||||
if attempt >= 3 {
|
||||
terminalListNodesErr = err
|
||||
}
|
||||
return false, terminalListNodesErr
|
||||
}
|
||||
for _, node := range allNodes.Items {
|
||||
if !readyForTests(&node, nonblockingTaints) {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -34,7 +34,7 @@ import (
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const etcdImage = "3.4.13-0"
|
||||
const etcdImage = "3.5.0-0"
|
||||
|
||||
// EtcdUpgrade upgrades etcd on GCE.
|
||||
func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
|
43
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
43
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -19,7 +19,6 @@ package pod
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -154,15 +153,7 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Volumes = volumes
|
||||
setVolumes(&podSpec.Spec, pvclaims, nil /*inline volume sources*/, false /*PVCs readonly*/)
|
||||
if nodeSelector != nil {
|
||||
podSpec.Spec.NodeSelector = nodeSelector
|
||||
}
|
||||
@ -175,11 +166,12 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
if podConfig.NS == "" {
|
||||
return nil, fmt.Errorf("Cannot create pod with empty namespace")
|
||||
}
|
||||
if len(podConfig.Command) == 0 {
|
||||
if len(podConfig.Command) == 0 && !NodeOSDistroIs("windows") {
|
||||
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
if podConfig.FsGroup == nil && runtime.GOOS != "windows" {
|
||||
if podConfig.FsGroup == nil && !NodeOSDistroIs("windows") {
|
||||
podConfig.FsGroup = func(i int64) *int64 {
|
||||
return &i
|
||||
}(1000)
|
||||
@ -223,33 +215,42 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
|
||||
podSpec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
|
||||
}
|
||||
|
||||
setVolumes(podSpec, podConfig.PVCs, podConfig.InlineVolumeSources, podConfig.PVCsReadOnly)
|
||||
SetNodeSelection(podSpec, podConfig.NodeSelection)
|
||||
return podSpec
|
||||
}
|
||||
|
||||
func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, pvcsReadOnly bool) {
|
||||
var volumeMounts = make([]v1.VolumeMount, 0)
|
||||
var volumeDevices = make([]v1.VolumeDevice, 0)
|
||||
var volumes = make([]v1.Volume, len(podConfig.PVCs)+len(podConfig.InlineVolumeSources))
|
||||
var volumes = make([]v1.Volume, len(pvcs)+len(inlineVolumeSources))
|
||||
volumeIndex := 0
|
||||
for _, pvclaim := range podConfig.PVCs {
|
||||
for _, pvclaim := range pvcs {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
|
||||
} else {
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
}
|
||||
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}}
|
||||
volumes[volumeIndex] = v1.Volume{
|
||||
Name: volumename,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvclaim.Name,
|
||||
ReadOnly: pvcsReadOnly,
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeIndex++
|
||||
}
|
||||
for _, src := range podConfig.InlineVolumeSources {
|
||||
for _, src := range inlineVolumeSources {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
// In-line volumes can be only filesystem, not block.
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
|
||||
volumeIndex++
|
||||
}
|
||||
|
||||
podSpec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Volumes = volumes
|
||||
|
||||
SetNodeSelection(podSpec, podConfig.NodeSelection)
|
||||
return podSpec
|
||||
}
|
||||
|
215
vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go
generated
vendored
Normal file
215
vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NewTransport creates a transport which uses the port forward dialer.
|
||||
// URLs must use <namespace>.<pod>:<port> as host.
|
||||
func NewTransport(client kubernetes.Interface, restConfig *rest.Config) *http.Transport {
|
||||
return &http.Transport{
|
||||
DialContext: func(ctx context.Context, _, addr string) (net.Conn, error) {
|
||||
dialer := NewDialer(client, restConfig)
|
||||
a, err := ParseAddr(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dialer.DialContainerPort(ctx, *a)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDialer creates a dialer that supports connecting to container ports.
|
||||
func NewDialer(client kubernetes.Interface, restConfig *rest.Config) *Dialer {
|
||||
return &Dialer{
|
||||
client: client,
|
||||
restConfig: restConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// Dialer holds the relevant parameters that are independent of a particular connection.
|
||||
type Dialer struct {
|
||||
client kubernetes.Interface
|
||||
restConfig *rest.Config
|
||||
}
|
||||
|
||||
// DialContainerPort connects to a certain container port in a pod.
|
||||
func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Conn, finalErr error) {
|
||||
restClient := d.client.CoreV1().RESTClient()
|
||||
restConfig := d.restConfig
|
||||
if restConfig.GroupVersion == nil {
|
||||
restConfig.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if restConfig.NegotiatedSerializer == nil {
|
||||
restConfig.NegotiatedSerializer = scheme.Codecs
|
||||
}
|
||||
|
||||
// The setup code around the actual portforward is from
|
||||
// https://github.com/kubernetes/kubernetes/blob/c652ffbe4a29143623a1aaec39f745575f7e43ad/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go
|
||||
req := restClient.Post().
|
||||
Resource("pods").
|
||||
Namespace(addr.Namespace).
|
||||
Name(addr.PodName).
|
||||
SubResource("portforward")
|
||||
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create round tripper: %v", err)
|
||||
}
|
||||
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
|
||||
|
||||
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialer failed: %v", err)
|
||||
}
|
||||
requestID := "1"
|
||||
defer func() {
|
||||
if finalErr != nil {
|
||||
streamConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// create error stream
|
||||
headers := http.Header{}
|
||||
headers.Set(v1.StreamType, v1.StreamTypeError)
|
||||
headers.Set(v1.PortHeader, fmt.Sprintf("%d", addr.Port))
|
||||
headers.Set(v1.PortForwardRequestIDHeader, requestID)
|
||||
|
||||
// We're not writing to this stream, just reading an error message from it.
|
||||
// This happens asynchronously.
|
||||
errorStream, err := streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating error stream: %v", err)
|
||||
}
|
||||
errorStream.Close()
|
||||
go func() {
|
||||
message, err := ioutil.ReadAll(errorStream)
|
||||
switch {
|
||||
case err != nil:
|
||||
klog.ErrorS(err, "error reading from error stream")
|
||||
case len(message) > 0:
|
||||
klog.ErrorS(errors.New(string(message)), "an error occurred connecting to the remote port")
|
||||
}
|
||||
}()
|
||||
|
||||
// create data stream
|
||||
headers.Set(v1.StreamType, v1.StreamTypeData)
|
||||
dataStream, err := streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating data stream: %v", err)
|
||||
}
|
||||
|
||||
return &stream{
|
||||
Stream: dataStream,
|
||||
streamConn: streamConn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Addr contains all relevant parameters for a certain port in a pod.
|
||||
// The container should be running before connections are attempted,
|
||||
// otherwise the connection will fail.
|
||||
type Addr struct {
|
||||
Namespace, PodName string
|
||||
Port int
|
||||
}
|
||||
|
||||
var _ net.Addr = Addr{}
|
||||
|
||||
func (a Addr) Network() string {
|
||||
return "port-forwarding"
|
||||
}
|
||||
|
||||
func (a Addr) String() string {
|
||||
return fmt.Sprintf("%s.%s:%d", a.Namespace, a.PodName, a.Port)
|
||||
}
|
||||
|
||||
// ParseAddr expects a <namespace>.<pod>:<port number> as produced
|
||||
// by Addr.String.
|
||||
func ParseAddr(addr string) (*Addr, error) {
|
||||
parts := addrRegex.FindStringSubmatch(addr)
|
||||
if parts == nil {
|
||||
return nil, fmt.Errorf("%q: must match the format <namespace>.<pod>:<port number>", addr)
|
||||
}
|
||||
port, _ := strconv.Atoi(parts[3])
|
||||
return &Addr{
|
||||
Namespace: parts[1],
|
||||
PodName: parts[2],
|
||||
Port: port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var addrRegex = regexp.MustCompile(`^([^\.]+)\.([^:]+):(\d+)$`)
|
||||
|
||||
type stream struct {
|
||||
addr Addr
|
||||
httpstream.Stream
|
||||
streamConn httpstream.Connection
|
||||
}
|
||||
|
||||
var _ net.Conn = &stream{}
|
||||
|
||||
func (s *stream) Close() error {
|
||||
s.Stream.Close()
|
||||
s.streamConn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stream) LocalAddr() net.Addr {
|
||||
return LocalAddr{}
|
||||
}
|
||||
|
||||
func (s *stream) RemoteAddr() net.Addr {
|
||||
return s.addr
|
||||
}
|
||||
|
||||
func (s *stream) SetDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stream) SetReadDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stream) SetWriteDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type LocalAddr struct{}
|
||||
|
||||
var _ net.Addr = LocalAddr{}
|
||||
|
||||
func (l LocalAddr) Network() string { return "port-forwarding" }
|
||||
func (l LocalAddr) String() string { return "apiserver" }
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -34,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -37,14 +37,9 @@ func NodeOSDistroIs(distro string) bool {
|
||||
}
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
if !NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", command}
|
||||
}
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
return commands
|
||||
}
|
||||
|
||||
|
15
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -435,6 +435,21 @@ func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods
|
||||
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
}
|
||||
|
||||
// WaitForNumberOfPods waits up to timeout to ensure there are exact
|
||||
// `num` pods in namespace `ns`.
|
||||
// It returns the matching Pods or a timeout error.
|
||||
func WaitForNumberOfPods(c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
// ignore intermittent network error
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return len(pods.Items) == num, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
||||
// matching pod exists. Return the list of matching pods.
|
||||
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@ -181,7 +181,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
c := &pod.Spec.Containers[i]
|
||||
if c.ImagePullPolicy == v1.PullAlways {
|
||||
// If the image pull policy is PullAlways, the image doesn't need to be in
|
||||
// the white list or pre-pulled, because the image is expected to be pulled
|
||||
// the allow list or pre-pulled, because the image is expected to be pulled
|
||||
// in the test anyway.
|
||||
continue
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/ports.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/ports.go
generated
vendored
@ -22,10 +22,6 @@ const (
|
||||
// KubeletPort is the default port for the kubelet server on each host machine.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeletPort = 10250
|
||||
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeControllerManagerPort instead.
|
||||
InsecureKubeControllerManagerPort = 10252
|
||||
// KubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeControllerManagerPort = 10257
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/provider.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/provider.go
generated
vendored
@ -21,8 +21,6 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
@ -78,7 +76,7 @@ func SetupProviderConfig(providerName string) (ProviderInterface, error) {
|
||||
defer mutex.Unlock()
|
||||
factory, ok := providers[providerName]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(os.ErrNotExist, "The provider %s is unknown.", providerName)
|
||||
return nil, fmt.Errorf("The provider %s is unknown: %w", providerName, os.ErrNotExist)
|
||||
}
|
||||
provider, err := factory()
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
@ -177,7 +177,13 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
})
|
||||
},
|
||||
rbacv1.Subject{
|
||||
Kind: rbacv1.GroupKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "system:serviceaccounts:" + namespace,
|
||||
},
|
||||
)
|
||||
ExpectNoError(err)
|
||||
ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -49,6 +49,7 @@ var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIso
|
||||
var (
|
||||
downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
|
||||
execProbeTimeout featuregate.Feature = "ExecProbeTimeout"
|
||||
csiMigration featuregate.Feature = "CSIMigration"
|
||||
)
|
||||
|
||||
func skipInternalf(caller int, format string, args ...interface{}) {
|
||||
@ -154,6 +155,12 @@ func SkipUnlessExecProbeTimeoutEnabled() {
|
||||
}
|
||||
}
|
||||
|
||||
func SkipIfCSIMigrationEnabled() {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(csiMigration) {
|
||||
skipInternalf(1, "Only supported when %v feature is disabled", csiMigration)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfMissingResource skips if the gvr resource is missing.
|
||||
func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
|
||||
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
44
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
@ -48,6 +49,9 @@ const (
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
|
||||
// sshBastionEnvKey is the environment variable key for running SSH commands via bastion.
|
||||
sshBastionEnvKey = "KUBE_SSH_BASTION"
|
||||
)
|
||||
|
||||
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
|
||||
@ -133,13 +137,45 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
len(hosts), len(nodelist.Items), nodelist)
|
||||
}
|
||||
|
||||
sshHosts := make([]string, 0, len(hosts))
|
||||
for _, h := range hosts {
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(h, SSHPort))
|
||||
lenHosts := len(hosts)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(lenHosts)
|
||||
sshHosts := make([]string, 0, lenHosts)
|
||||
var sshHostsLock sync.Mutex
|
||||
|
||||
for _, host := range hosts {
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if canConnect(host) {
|
||||
e2elog.Logf("Assuming SSH on host %s", host)
|
||||
sshHostsLock.Lock()
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(host, SSHPort))
|
||||
sshHostsLock.Unlock()
|
||||
} else {
|
||||
e2elog.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
|
||||
}
|
||||
}(host)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return sshHosts, nil
|
||||
}
|
||||
|
||||
// canConnect returns true if a network connection is possible to the SSHPort.
|
||||
func canConnect(host string) bool {
|
||||
if _, ok := os.LookupEnv(sshBastionEnvKey); ok {
|
||||
return true
|
||||
}
|
||||
hostPort := net.JoinHostPort(host, SSHPort)
|
||||
conn, err := net.DialTimeout("tcp", hostPort, 3*time.Second)
|
||||
if err != nil {
|
||||
e2elog.Logf("cannot dial %s: %v", hostPort, err)
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Result holds the execution result of SSH command
|
||||
type Result struct {
|
||||
User string
|
||||
@ -176,7 +212,7 @@ func SSH(cmd, host, provider string) (Result, error) {
|
||||
result.User = os.Getenv("USER")
|
||||
}
|
||||
|
||||
if bastion := os.Getenv("KUBE_SSH_BASTION"); len(bastion) > 0 {
|
||||
if bastion := os.Getenv(sshBastionEnvKey); len(bastion) > 0 {
|
||||
stdout, stderr, code, err := runSSHCommandViaBastion(cmd, result.User, bastion, host, signer)
|
||||
result.Stdout = stdout
|
||||
result.Stderr = stderr
|
||||
|
15
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -19,6 +19,7 @@ package framework
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -29,7 +30,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/pkg/errors"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@ -182,6 +182,12 @@ type TestContextType struct {
|
||||
|
||||
// DockerConfigFile is a file that contains credentials which can be used to pull images from certain private registries, needed for a test.
|
||||
DockerConfigFile string
|
||||
|
||||
// SnapshotControllerPodName is the name used for identifying the snapshot controller pod.
|
||||
SnapshotControllerPodName string
|
||||
|
||||
// SnapshotControllerHTTPPort the port used for communicating with the snapshot controller HTTP endpoint.
|
||||
SnapshotControllerHTTPPort int
|
||||
}
|
||||
|
||||
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
|
||||
@ -223,6 +229,8 @@ type NodeTestContextType struct {
|
||||
// the node e2e test. If empty, the default one (system.DefaultSpec) is
|
||||
// used. The system specs are in test/e2e_node/system/specs/.
|
||||
SystemSpecName string
|
||||
// RestartKubelet restarts Kubelet unit when the process is killed.
|
||||
RestartKubelet bool
|
||||
// ExtraEnvs is a map of environment names to values.
|
||||
ExtraEnvs map[string]string
|
||||
}
|
||||
@ -315,6 +323,9 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
|
||||
flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.")
|
||||
flags.StringVar(&TestContext.DockerConfigFile, "docker-config-file", "", "A file that contains credentials which can be used to pull images from certain private registries, needed for a test.")
|
||||
|
||||
flags.StringVar(&TestContext.SnapshotControllerPodName, "snapshot-controller-pod-name", "", "The pod name to use for identifying the snapshot controller in the kube-system namespace.")
|
||||
flags.IntVar(&TestContext.SnapshotControllerHTTPPort, "snapshot-controller-http-port", 0, "The port to use for snapshot controller HTTP communication.")
|
||||
}
|
||||
|
||||
// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
|
||||
@ -473,7 +484,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
var err error
|
||||
TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if os.IsNotExist(errors.Unwrap(err)) {
|
||||
// Provide a more helpful error message when the provider is unknown.
|
||||
var providers []string
|
||||
for _, name := range GetProviders() {
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testdata/a/foo.txt
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testdata/a/foo.txt
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Hello World
|
56
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testfiles.go
generated
vendored
56
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testfiles.go
generated
vendored
@ -25,13 +25,14 @@ limitations under the License.
|
||||
package testfiles
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -147,32 +148,47 @@ func (r RootFileSource) DescribeFiles() string {
|
||||
return description
|
||||
}
|
||||
|
||||
// BindataFileSource handles files stored in a package generated with bindata.
|
||||
type BindataFileSource struct {
|
||||
Asset func(string) ([]byte, error)
|
||||
AssetNames func() []string
|
||||
// EmbeddedFileSource handles files stored in a package generated with bindata.
|
||||
type EmbeddedFileSource struct {
|
||||
EmbeddedFS embed.FS
|
||||
Root string
|
||||
fileList []string
|
||||
}
|
||||
|
||||
// ReadTestFile looks for an asset with the given path.
|
||||
func (b BindataFileSource) ReadTestFile(filePath string) ([]byte, error) {
|
||||
fileBytes, err := b.Asset(filePath)
|
||||
// ReadTestFile looks for an embedded file with the given path.
|
||||
func (e EmbeddedFileSource) ReadTestFile(filepath string) ([]byte, error) {
|
||||
relativePath := strings.TrimPrefix(filepath, fmt.Sprintf("%s/", e.Root))
|
||||
|
||||
b, err := e.EmbeddedFS.ReadFile(relativePath)
|
||||
if err != nil {
|
||||
// It would be nice to have a better way to detect
|
||||
// "not found" errors :-/
|
||||
if strings.HasSuffix(err.Error(), "not found") {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return fileBytes, nil
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// DescribeFiles explains about gobindata and then lists all available files.
|
||||
func (b BindataFileSource) DescribeFiles() string {
|
||||
// DescribeFiles explains that it is looking inside an embedded filesystem
|
||||
func (e EmbeddedFileSource) DescribeFiles() string {
|
||||
var lines []string
|
||||
lines = append(lines, "The following files are built into the test executable via gobindata. For questions on maintaining gobindata, contact the sig-testing group.")
|
||||
assets := b.AssetNames()
|
||||
sort.Strings(assets)
|
||||
lines = append(lines, assets...)
|
||||
description := strings.Join(lines, "\n ")
|
||||
return description
|
||||
lines = append(lines, "The following files are embedded into the test executable:")
|
||||
|
||||
if len(e.fileList) == 0 {
|
||||
e.populateFileList()
|
||||
}
|
||||
lines = append(lines, e.fileList...)
|
||||
|
||||
return strings.Join(lines, "\n\t")
|
||||
}
|
||||
|
||||
func (e *EmbeddedFileSource) populateFileList() {
|
||||
fs.WalkDir(e.EmbeddedFS, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if !d.IsDir() {
|
||||
e.fileList = append(e.fileList, filepath.Join(e.Root, path))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
57
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
57
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
@ -20,19 +20,20 @@ import "time"
|
||||
|
||||
const (
|
||||
// Default timeouts to be used in TimeoutContext
|
||||
podStartTimeout = 5 * time.Minute
|
||||
podStartShortTimeout = 2 * time.Minute
|
||||
podStartSlowTimeout = 15 * time.Minute
|
||||
podDeleteTimeout = 5 * time.Minute
|
||||
claimProvisionTimeout = 5 * time.Minute
|
||||
claimProvisionShortTimeout = 1 * time.Minute
|
||||
claimBoundTimeout = 3 * time.Minute
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
snapshotDeleteTimeout = 5 * time.Minute
|
||||
podStartTimeout = 5 * time.Minute
|
||||
podStartShortTimeout = 2 * time.Minute
|
||||
podStartSlowTimeout = 15 * time.Minute
|
||||
podDeleteTimeout = 5 * time.Minute
|
||||
claimProvisionTimeout = 5 * time.Minute
|
||||
claimProvisionShortTimeout = 1 * time.Minute
|
||||
claimBoundTimeout = 3 * time.Minute
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
snapshotDeleteTimeout = 5 * time.Minute
|
||||
snapshotControllerMetricsTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// TimeoutContext contains timeout settings for several actions.
|
||||
@ -77,23 +78,27 @@ type TimeoutContext struct {
|
||||
|
||||
// SnapshotDelete is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDelete time.Duration
|
||||
|
||||
// SnapshotControllerMetrics is how long to wait for snapshot controller metrics.
|
||||
SnapshotControllerMetrics time.Duration
|
||||
}
|
||||
|
||||
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values.
|
||||
func NewTimeoutContextWithDefaults() *TimeoutContext {
|
||||
return &TimeoutContext{
|
||||
PodStart: podStartTimeout,
|
||||
PodStartShort: podStartShortTimeout,
|
||||
PodStartSlow: podStartSlowTimeout,
|
||||
PodDelete: podDeleteTimeout,
|
||||
ClaimProvision: claimProvisionTimeout,
|
||||
ClaimProvisionShort: claimProvisionShortTimeout,
|
||||
ClaimBound: claimBoundTimeout,
|
||||
PVReclaim: pvReclaimTimeout,
|
||||
PVBound: pvBoundTimeout,
|
||||
PVDelete: pvDeleteTimeout,
|
||||
PVDeleteSlow: pvDeleteSlowTimeout,
|
||||
SnapshotCreate: snapshotCreateTimeout,
|
||||
SnapshotDelete: snapshotDeleteTimeout,
|
||||
PodStart: podStartTimeout,
|
||||
PodStartShort: podStartShortTimeout,
|
||||
PodStartSlow: podStartSlowTimeout,
|
||||
PodDelete: podDeleteTimeout,
|
||||
ClaimProvision: claimProvisionTimeout,
|
||||
ClaimProvisionShort: claimProvisionShortTimeout,
|
||||
ClaimBound: claimBoundTimeout,
|
||||
PVReclaim: pvReclaimTimeout,
|
||||
PVBound: pvBoundTimeout,
|
||||
PVDelete: pvDeleteTimeout,
|
||||
PVDeleteSlow: pvDeleteSlowTimeout,
|
||||
SnapshotCreate: snapshotCreateTimeout,
|
||||
SnapshotDelete: snapshotDeleteTimeout,
|
||||
SnapshotControllerMetrics: snapshotControllerMetricsTimeout,
|
||||
}
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/framework/volume/OWNERS
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/framework/volume/OWNERS
generated
vendored
@ -1,20 +1,11 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- saad-ali
|
||||
- sig-storage-approvers
|
||||
- rootfs
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- rootfs
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
- sig-storage-reviewers
|
||||
- jeffvance
|
||||
- copejon
|
||||
- verult
|
||||
- davidz627
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -368,11 +368,7 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
|
||||
var gracePeriod int64 = 1
|
||||
var command string
|
||||
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
command = "while true ; do sleep 2; done "
|
||||
} else {
|
||||
command = "while(1) {sleep 2}"
|
||||
}
|
||||
command = "while true ; do sleep 2; done "
|
||||
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
|
||||
clientPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -572,47 +568,30 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
func generateWriteCmd(content, path string) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", "echo '" + content + "' > " + path}
|
||||
}
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||
} else {
|
||||
// TODO: is there a way on windows to get the first X bytes from a device?
|
||||
commands = []string{"powershell", "/c", "type " + fullPath}
|
||||
}
|
||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateWriteBlockCmd generates the corresponding command lines to write to a block device the given content.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateWriteBlockCmd(content, fullPath string) []string {
|
||||
return generateWriteCmd(content, fullPath)
|
||||
}
|
||||
|
||||
// GenerateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateReadFileCmd(fullPath string) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"cat", fullPath}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", "type " + fullPath}
|
||||
}
|
||||
commands = []string{"cat", fullPath}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateWriteFileCmd(content, fullPath string) []string {
|
||||
return generateWriteCmd(content, fullPath)
|
||||
}
|
||||
@ -638,9 +617,6 @@ func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persi
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
|
||||
}
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
}
|
||||
|
||||
|
88
vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
88
vendor/k8s.io/kubernetes/test/e2e/storage/podlogs/podlogs.go
generated
vendored
@ -33,8 +33,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -57,11 +56,18 @@ type LogOutput struct {
|
||||
// Matches harmless errors from pkg/kubelet/kubelet_pods.go.
|
||||
var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|waiting to start|not available)|the server could not find the requested resource`)
|
||||
|
||||
// CopyAllLogs follows the logs of all containers in all pods,
|
||||
// CopyPodLogs is basically CopyPodLogs for all current or future pods in the given namespace ns
|
||||
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
|
||||
return CopyPodLogs(ctx, cs, ns, "", to)
|
||||
}
|
||||
|
||||
// CopyPodLogs follows the logs of all containers in pod with the given podName,
|
||||
// including those that get created in the future, and writes each log
|
||||
// line as configured in the output options. It does that until the
|
||||
// context is done or until an error occurs.
|
||||
//
|
||||
// If podName is empty, it will follow all pods in the given namespace ns.
|
||||
//
|
||||
// Beware that there is currently no way to force log collection
|
||||
// before removing pods, which means that there is a known race
|
||||
// between "stop pod" and "collecting log entries". The alternative
|
||||
@ -79,10 +85,17 @@ var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|w
|
||||
// But it turned out to be rather confusing, so now a heuristic is used: if
|
||||
// log output of a container was already captured, then capturing does not
|
||||
// resume if the pod is marked for deletion.
|
||||
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
|
||||
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
|
||||
func CopyPodLogs(ctx context.Context, cs clientset.Interface, ns, podName string, to LogOutput) error {
|
||||
options := meta.ListOptions{}
|
||||
if podName != "" {
|
||||
options = meta.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", podName),
|
||||
}
|
||||
}
|
||||
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), options)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot create Pod event watcher")
|
||||
return fmt.Errorf("cannot create Pod event watcher: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
@ -96,7 +109,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), meta.ListOptions{})
|
||||
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
if to.StatusWriter != nil {
|
||||
fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err)
|
||||
@ -252,18 +265,42 @@ func logsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opt
|
||||
}
|
||||
|
||||
// WatchPods prints pod status events for a certain namespace or all namespaces
|
||||
// when namespace name is empty.
|
||||
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error {
|
||||
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
|
||||
// when namespace name is empty. The closer can be nil if the caller doesn't want
|
||||
// the file to be closed when watching stops.
|
||||
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer, toCloser io.Closer) (finalErr error) {
|
||||
defer func() {
|
||||
if finalErr != nil && toCloser != nil {
|
||||
toCloser.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
pods, err := cs.CoreV1().Pods(ns).Watch(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot create Pod event watcher")
|
||||
return fmt.Errorf("cannot create Pod watcher: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if finalErr != nil {
|
||||
pods.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
events, err := cs.CoreV1().Events(ns).Watch(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create Event watcher: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer watcher.Stop()
|
||||
defer func() {
|
||||
pods.Stop()
|
||||
events.Stop()
|
||||
if toCloser != nil {
|
||||
toCloser.Close()
|
||||
}
|
||||
}()
|
||||
timeFormat := "15:04:05.000"
|
||||
for {
|
||||
select {
|
||||
case e := <-watcher.ResultChan():
|
||||
case e := <-pods.ResultChan():
|
||||
if e.Object == nil {
|
||||
continue
|
||||
}
|
||||
@ -274,7 +311,8 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
fmt.Fprintf(buffer,
|
||||
"pod event: %s: %s/%s %s: %s %s\n",
|
||||
"%s pod: %s: %s/%s %s: %s %s\n",
|
||||
time.Now().Format(timeFormat),
|
||||
e.Type,
|
||||
pod.Namespace,
|
||||
pod.Name,
|
||||
@ -300,7 +338,29 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri
|
||||
fmt.Fprintf(buffer, "\n")
|
||||
}
|
||||
to.Write(buffer.Bytes())
|
||||
case e := <-events.ResultChan():
|
||||
if e.Object == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
event, ok := e.Object.(*v1.Event)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
to.Write([]byte(fmt.Sprintf("%s event: %s/%s %s: %s %s: %s (%v - %v)\n",
|
||||
time.Now().Format(timeFormat),
|
||||
event.InvolvedObject.APIVersion,
|
||||
event.InvolvedObject.Kind,
|
||||
event.InvolvedObject.Name,
|
||||
event.Source.Component,
|
||||
event.Type,
|
||||
event.Message,
|
||||
event.FirstTimestamp,
|
||||
event.LastTimestamp,
|
||||
)))
|
||||
case <-ctx.Done():
|
||||
to.Write([]byte(fmt.Sprintf("%s ==== stopping pod watch ====\n",
|
||||
time.Now().Format(timeFormat))))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -20,10 +20,9 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@ -58,17 +57,17 @@ func LoadFromManifests(files ...string) ([]interface{}, error) {
|
||||
// Ignore any additional fields for now, just determine what we have.
|
||||
var what What
|
||||
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, &what); err != nil {
|
||||
return errors.Wrap(err, "decode TypeMeta")
|
||||
return fmt.Errorf("decode TypeMeta: %w", err)
|
||||
}
|
||||
|
||||
factory := factories[what]
|
||||
if factory == nil {
|
||||
return errors.Errorf("item of type %+v not supported", what)
|
||||
return fmt.Errorf("item of type %+v not supported", what)
|
||||
}
|
||||
|
||||
object := factory.New()
|
||||
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, object); err != nil {
|
||||
return errors.Wrapf(err, "decode %+v", what)
|
||||
return fmt.Errorf("decode %+v: %w", what, err)
|
||||
}
|
||||
items = append(items, object)
|
||||
return nil
|
||||
@ -96,7 +95,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
|
||||
|
||||
for _, item := range items {
|
||||
if err := cb(item); err != nil {
|
||||
return errors.Wrap(err, fileName)
|
||||
return fmt.Errorf("%s: %w", fileName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -173,13 +172,13 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
|
||||
if err == nil {
|
||||
done = true
|
||||
break
|
||||
} else if errors.Cause(err) != errorItemNotSupported {
|
||||
} else if !errors.Is(err, errorItemNotSupported) {
|
||||
result = err
|
||||
break
|
||||
}
|
||||
}
|
||||
if result == nil && !done {
|
||||
result = errors.Errorf("item of type %T not supported", item)
|
||||
result = fmt.Errorf("item of type %T not supported", item)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -198,7 +197,7 @@ func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{})
|
||||
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
|
||||
items, err := LoadFromManifests(files...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateFromManifests")
|
||||
return nil, fmt.Errorf("CreateFromManifests: %w", err)
|
||||
}
|
||||
if err := PatchItems(f, driverNamespace, items...); err != nil {
|
||||
return nil, err
|
||||
@ -337,21 +336,21 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
PatchName(f, &item.Name)
|
||||
for i := range item.Subjects {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
}
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
case *rbacv1.RoleBinding:
|
||||
PatchNamespace(f, driverNamespace, &item.Namespace)
|
||||
for i := range item.Subjects {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
}
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
return fmt.Errorf("%T: %w", f, err)
|
||||
}
|
||||
case *v1.Service:
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
@ -372,7 +371,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("missing support for patching item of type %T", item)
|
||||
return fmt.Errorf("missing support for patching item of type %T", item)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -395,7 +394,7 @@ func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i
|
||||
}
|
||||
client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create ServiceAccount")
|
||||
return nil, fmt.Errorf("create ServiceAccount: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -417,7 +416,7 @@ func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
|
||||
framework.Logf("Define cluster role %v", item.GetName())
|
||||
client := f.ClientSet.RbacV1().ClusterRoles()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRole")
|
||||
return nil, fmt.Errorf("create ClusterRole: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -438,7 +437,7 @@ func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespac
|
||||
|
||||
client := f.ClientSet.RbacV1().ClusterRoleBindings()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRoleBinding")
|
||||
return nil, fmt.Errorf("create ClusterRoleBinding: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -459,7 +458,7 @@ func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface
|
||||
|
||||
client := f.ClientSet.RbacV1().Roles(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Role")
|
||||
return nil, fmt.Errorf("create Role: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -480,7 +479,7 @@ func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
|
||||
|
||||
client := f.ClientSet.RbacV1().RoleBindings(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create RoleBinding")
|
||||
return nil, fmt.Errorf("create RoleBinding: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -501,7 +500,7 @@ func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interf
|
||||
|
||||
client := f.ClientSet.CoreV1().Services(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Service")
|
||||
return nil, fmt.Errorf("create Service: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -522,7 +521,7 @@ func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i in
|
||||
|
||||
client := f.ClientSet.AppsV1().StatefulSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create StatefulSet")
|
||||
return nil, fmt.Errorf("create StatefulSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -543,7 +542,7 @@ func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i inte
|
||||
|
||||
client := f.ClientSet.AppsV1().DaemonSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create DaemonSet")
|
||||
return nil, fmt.Errorf("create DaemonSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -564,7 +563,7 @@ func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i i
|
||||
|
||||
client := f.ClientSet.StorageV1().StorageClasses()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create StorageClass")
|
||||
return nil, fmt.Errorf("create StorageClass: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -585,7 +584,7 @@ func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i inte
|
||||
|
||||
client := f.ClientSet.StorageV1().CSIDrivers()
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create CSIDriver")
|
||||
return nil, fmt.Errorf("create CSIDriver: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
@ -606,7 +605,7 @@ func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interfa
|
||||
|
||||
client := f.ClientSet.CoreV1().Secrets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Secret")
|
||||
return nil, fmt.Errorf("create Secret: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
@ -19,6 +19,9 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@ -46,6 +49,8 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
|
||||
ns := driverNamespace.Name
|
||||
|
||||
podEventLog := ginkgo.GinkgoWriter
|
||||
var podEventLogCloser io.Closer
|
||||
to := podlogs.LogOutput{
|
||||
StatusWriter: ginkgo.GinkgoWriter,
|
||||
}
|
||||
@ -69,17 +74,22 @@ func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func()
|
||||
// keeps each directory name smaller (the full test
|
||||
// name at one point exceeded 256 characters, which was
|
||||
// too much for some filesystems).
|
||||
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
|
||||
strings.Join(components, "/") + "/"
|
||||
logDir := framework.TestContext.ReportDir + "/" + strings.Join(components, "/")
|
||||
to.LogPathPrefix = logDir + "/"
|
||||
|
||||
err := os.MkdirAll(logDir, 0755)
|
||||
framework.ExpectNoError(err, "create pod log directory")
|
||||
f, err := os.Create(path.Join(logDir, "pod-event.log"))
|
||||
framework.ExpectNoError(err, "create pod events log file")
|
||||
podEventLog = f
|
||||
podEventLogCloser = f
|
||||
}
|
||||
podlogs.CopyAllLogs(ctx, cs, ns, to)
|
||||
|
||||
// pod events are something that the framework already collects itself
|
||||
// after a failed test. Logging them live is only useful for interactive
|
||||
// debugging, not when we collect reports.
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
|
||||
}
|
||||
// The framework doesn't know about the driver pods because of
|
||||
// the separate namespace. Therefore we always capture the
|
||||
// events ourselves.
|
||||
podlogs.WatchPods(ctx, cs, ns, podEventLog, podEventLogCloser)
|
||||
|
||||
return cancel
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
@ -130,7 +130,6 @@ func GenerateSnapshotClassSpec(
|
||||
snapshotter string,
|
||||
parameters map[string]string,
|
||||
ns string,
|
||||
suffix string,
|
||||
) *unstructured.Unstructured {
|
||||
snapshotClass := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
@ -138,8 +137,7 @@ func GenerateSnapshotClassSpec(
|
||||
"apiVersion": SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
// Name must be unique, so let's base it on namespace name and use GenerateName
|
||||
// TODO(#96234): Remove unnecessary suffix.
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns + "-" + suffix),
|
||||
"name": names.SimpleNameGenerator.GenerateName(ns),
|
||||
},
|
||||
"driver": snapshotter,
|
||||
"parameters": parameters,
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -571,6 +572,16 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
|
||||
}
|
||||
|
||||
// GetSectorSize returns the sector size of the device.
|
||||
func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int {
|
||||
stdout, _, err := e2evolume.PodExec(f, pod, fmt.Sprintf("blockdev --getss %s", device))
|
||||
framework.ExpectNoError(err, "Failed to get sector size of %s", device)
|
||||
ss, err := strconv.Atoi(stdout)
|
||||
framework.ExpectNoError(err, "Sector size returned by blockdev command isn't integer value.")
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
// findMountPoints returns all mount points on given node under specified directory.
|
||||
func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
|
||||
result, err := hostExec.IssueCommandWithResult(fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
|
||||
|
52
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
52
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
@ -55,182 +55,182 @@ func RetryWithExponentialBackOff(fn wait.ConditionFunc) error {
|
||||
|
||||
func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v ", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateRCWithRetries(c clientset.Interface, namespace string, obj *v1.ReplicationController) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateReplicaSetWithRetries(c clientset.Interface, namespace string, obj *apps.ReplicaSet) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateDeploymentWithRetries(c clientset.Interface, namespace string, obj *apps.Deployment) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().Deployments(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateDaemonSetWithRetries(c clientset.Interface, namespace string, obj *apps.DaemonSet) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.AppsV1().DaemonSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateJobWithRetries(c clientset.Interface, namespace string, obj *batch.Job) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.BatchV1().Jobs(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateSecretWithRetries(c clientset.Interface, namespace string, obj *v1.Secret) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Secrets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateConfigMapWithRetries(c clientset.Interface, namespace string, obj *v1.ConfigMap) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ConfigMaps(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateServiceWithRetries(c clientset.Interface, namespace string, obj *v1.Service) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().Services(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateStorageClassWithRetries(c clientset.Interface, obj *storage.StorageClass) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreateResourceQuotaWithRetries(c clientset.Interface, namespace string, obj *v1.ResourceQuota) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreatePersistentVolumeWithRetries(c clientset.Interface, obj *v1.PersistentVolume) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
||||
func CreatePersistentVolumeClaimWithRetries(c clientset.Interface, namespace string, obj *v1.PersistentVolumeClaim) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("Object provided to create is empty")
|
||||
return fmt.Errorf("object provided to create is empty")
|
||||
}
|
||||
createFunc := func() (bool, error) {
|
||||
_, err := c.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
@ -53,7 +53,7 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam
|
||||
case api.Kind("Service"):
|
||||
return c.CoreV1().Services(namespace).Delete(context.TODO(), name, options)
|
||||
default:
|
||||
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
|
||||
return fmt.Errorf("unsupported kind when deleting: %v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, nam
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to delete object with non-retriable error: %v", err)
|
||||
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(deleteFunc)
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -201,7 +200,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
}
|
||||
if err != nil {
|
||||
if deployment == nil {
|
||||
return errors.Wrapf(err, "error creating new replica set for deployment %q ", deploymentName)
|
||||
return fmt.Errorf("error creating new replica set for deployment %q: %w", deploymentName, err)
|
||||
}
|
||||
deploymentImage := ""
|
||||
if len(deployment.Spec.Template.Spec.Containers) > 0 {
|
||||
|
221
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
221
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -25,23 +25,25 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// RegistryList holds public and private image registries
|
||||
type RegistryList struct {
|
||||
GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"`
|
||||
E2eRegistry string `yaml:"e2eRegistry"`
|
||||
PromoterE2eRegistry string `yaml:"promoterE2eRegistry"`
|
||||
BuildImageRegistry string `yaml:"buildImageRegistry"`
|
||||
InvalidRegistry string `yaml:"invalidRegistry"`
|
||||
GcEtcdRegistry string `yaml:"gcEtcdRegistry"`
|
||||
GcRegistry string `yaml:"gcRegistry"`
|
||||
SigStorageRegistry string `yaml:"sigStorageRegistry"`
|
||||
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
|
||||
PrivateRegistry string `yaml:"privateRegistry"`
|
||||
SampleRegistry string `yaml:"sampleRegistry"`
|
||||
MicrosoftRegistry string `yaml:"microsoftRegistry"`
|
||||
GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"`
|
||||
E2eRegistry string `yaml:"e2eRegistry"`
|
||||
PromoterE2eRegistry string `yaml:"promoterE2eRegistry"`
|
||||
BuildImageRegistry string `yaml:"buildImageRegistry"`
|
||||
InvalidRegistry string `yaml:"invalidRegistry"`
|
||||
GcEtcdRegistry string `yaml:"gcEtcdRegistry"`
|
||||
GcRegistry string `yaml:"gcRegistry"`
|
||||
SigStorageRegistry string `yaml:"sigStorageRegistry"`
|
||||
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
|
||||
PrivateRegistry string `yaml:"privateRegistry"`
|
||||
SampleRegistry string `yaml:"sampleRegistry"`
|
||||
MicrosoftRegistry string `yaml:"microsoftRegistry"`
|
||||
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
|
||||
CloudProviderGcpRegistry string `yaml:"cloudProviderGcpRegistry"`
|
||||
}
|
||||
|
||||
// Config holds an images registry, name, and version
|
||||
@ -67,20 +69,8 @@ func (i *Config) SetVersion(version string) {
|
||||
}
|
||||
|
||||
func initReg() RegistryList {
|
||||
registry := RegistryList{
|
||||
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
|
||||
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
|
||||
PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images",
|
||||
BuildImageRegistry: "k8s.gcr.io/build-image",
|
||||
InvalidRegistry: "invalid.com/invalid",
|
||||
GcEtcdRegistry: "k8s.gcr.io",
|
||||
GcRegistry: "k8s.gcr.io",
|
||||
SigStorageRegistry: "k8s.gcr.io/sig-storage",
|
||||
PrivateRegistry: "gcr.io/k8s-authenticated-test",
|
||||
SampleRegistry: "gcr.io/google-samples",
|
||||
GcrReleaseRegistry: "gcr.io/gke-release",
|
||||
MicrosoftRegistry: "mcr.microsoft.com",
|
||||
}
|
||||
registry := initRegistry
|
||||
|
||||
repoList := os.Getenv("KUBE_TEST_REPO_LIST")
|
||||
if repoList == "" {
|
||||
return registry
|
||||
@ -99,26 +89,27 @@ func initReg() RegistryList {
|
||||
}
|
||||
|
||||
var (
|
||||
initRegistry = RegistryList{
|
||||
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
|
||||
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
|
||||
PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images",
|
||||
BuildImageRegistry: "k8s.gcr.io/build-image",
|
||||
InvalidRegistry: "invalid.com/invalid",
|
||||
GcEtcdRegistry: "k8s.gcr.io",
|
||||
GcRegistry: "k8s.gcr.io",
|
||||
SigStorageRegistry: "k8s.gcr.io/sig-storage",
|
||||
PrivateRegistry: "gcr.io/k8s-authenticated-test",
|
||||
SampleRegistry: "gcr.io/google-samples",
|
||||
GcrReleaseRegistry: "gcr.io/gke-release",
|
||||
MicrosoftRegistry: "mcr.microsoft.com",
|
||||
DockerLibraryRegistry: "docker.io/library",
|
||||
CloudProviderGcpRegistry: "k8s.gcr.io/cloud-provider-gcp",
|
||||
}
|
||||
|
||||
registry = initReg()
|
||||
|
||||
// PrivateRegistry is an image repository that requires authentication
|
||||
PrivateRegistry = registry.PrivateRegistry
|
||||
|
||||
// Preconfigured image configs
|
||||
dockerLibraryRegistry = "docker.io/library"
|
||||
e2eRegistry = registry.E2eRegistry
|
||||
promoterE2eRegistry = registry.PromoterE2eRegistry
|
||||
buildImageRegistry = registry.BuildImageRegistry
|
||||
gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry
|
||||
gcEtcdRegistry = registry.GcEtcdRegistry
|
||||
gcRegistry = registry.GcRegistry
|
||||
sigStorageRegistry = registry.SigStorageRegistry
|
||||
gcrReleaseRegistry = registry.GcrReleaseRegistry
|
||||
invalidRegistry = registry.InvalidRegistry
|
||||
sampleRegistry = registry.SampleRegistry
|
||||
microsoftRegistry = registry.MicrosoftRegistry
|
||||
|
||||
imageConfigs, originalImageConfigs = initImageConfigs()
|
||||
imageConfigs, originalImageConfigs = initImageConfigs(registry)
|
||||
)
|
||||
|
||||
const (
|
||||
@ -211,51 +202,51 @@ const (
|
||||
WindowsServer
|
||||
)
|
||||
|
||||
func initImageConfigs() (map[int]Config, map[int]Config) {
|
||||
func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
|
||||
configs := map[int]Config{}
|
||||
configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.32"}
|
||||
configs[AgnhostPrivate] = Config{PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{gcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
configs[APIServer] = Config{promoterE2eRegistry, "sample-apiserver", "1.17.4"}
|
||||
configs[AppArmorLoader] = Config{promoterE2eRegistry, "apparmor-loader", "1.3"}
|
||||
configs[BusyBox] = Config{promoterE2eRegistry, "busybox", "1.29-1"}
|
||||
configs[CheckMetadataConcealment] = Config{promoterE2eRegistry, "metadata-concealment", "1.6"}
|
||||
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{promoterE2eRegistry, "cuda-vector-add", "2.2"}
|
||||
configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.6.2"}
|
||||
configs[EchoServer] = Config{promoterE2eRegistry, "echoserver", "2.3"}
|
||||
configs[Etcd] = Config{gcEtcdRegistry, "etcd", "3.4.13-0"}
|
||||
configs[GlusterDynamicProvisioner] = Config{promoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"}
|
||||
configs[Httpd] = Config{promoterE2eRegistry, "httpd", "2.4.38-1"}
|
||||
configs[HttpdNew] = Config{promoterE2eRegistry, "httpd", "2.4.39-1"}
|
||||
configs[InvalidRegistryImage] = Config{invalidRegistry, "alpine", "3.1"}
|
||||
configs[IpcUtils] = Config{promoterE2eRegistry, "ipc-utils", "1.2"}
|
||||
configs[JessieDnsutils] = Config{promoterE2eRegistry, "jessie-dnsutils", "1.4"}
|
||||
configs[Kitten] = Config{promoterE2eRegistry, "kitten", "1.4"}
|
||||
configs[Nautilus] = Config{promoterE2eRegistry, "nautilus", "1.4"}
|
||||
configs[NFSProvisioner] = Config{sigStorageRegistry, "nfs-provisioner", "v2.2.2"}
|
||||
configs[Nginx] = Config{promoterE2eRegistry, "nginx", "1.14-1"}
|
||||
configs[NginxNew] = Config{promoterE2eRegistry, "nginx", "1.15-1"}
|
||||
configs[NodePerfNpbEp] = Config{promoterE2eRegistry, "node-perf/npb-ep", "1.1"}
|
||||
configs[NodePerfNpbIs] = Config{promoterE2eRegistry, "node-perf/npb-is", "1.1"}
|
||||
configs[NodePerfTfWideDeep] = Config{promoterE2eRegistry, "node-perf/tf-wide-deep", "1.1"}
|
||||
configs[Nonewprivs] = Config{promoterE2eRegistry, "nonewprivs", "1.3"}
|
||||
configs[NonRoot] = Config{promoterE2eRegistry, "nonroot", "1.1"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.32"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.4"}
|
||||
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.3"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-1"}
|
||||
configs[CheckMetadataConcealment] = Config{list.PromoterE2eRegistry, "metadata-concealment", "1.6"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
|
||||
configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "buster-v1.6.5"}
|
||||
configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.3"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.4.13-0"}
|
||||
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-1"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-1"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
configs[IpcUtils] = Config{list.PromoterE2eRegistry, "ipc-utils", "1.2"}
|
||||
configs[JessieDnsutils] = Config{list.PromoterE2eRegistry, "jessie-dnsutils", "1.4"}
|
||||
configs[Kitten] = Config{list.PromoterE2eRegistry, "kitten", "1.4"}
|
||||
configs[Nautilus] = Config{list.PromoterE2eRegistry, "nautilus", "1.4"}
|
||||
configs[NFSProvisioner] = Config{list.SigStorageRegistry, "nfs-provisioner", "v2.2.2"}
|
||||
configs[Nginx] = Config{list.PromoterE2eRegistry, "nginx", "1.14-1"}
|
||||
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-1"}
|
||||
configs[NodePerfNpbEp] = Config{list.PromoterE2eRegistry, "node-perf/npb-ep", "1.1"}
|
||||
configs[NodePerfNpbIs] = Config{list.PromoterE2eRegistry, "node-perf/npb-is", "1.1"}
|
||||
configs[NodePerfTfWideDeep] = Config{list.PromoterE2eRegistry, "node-perf/tf-wide-deep", "1.1"}
|
||||
configs[Nonewprivs] = Config{list.PromoterE2eRegistry, "nonewprivs", "1.3"}
|
||||
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.1"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{gcRegistry, "pause", "3.4.1"}
|
||||
configs[Perl] = Config{promoterE2eRegistry, "perl", "5.26"}
|
||||
configs[PrometheusDummyExporter] = Config{gcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{gcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
configs[Redis] = Config{promoterE2eRegistry, "redis", "5.0.5-alpine"}
|
||||
configs[RegressionIssue74839] = Config{promoterE2eRegistry, "regression-issue-74839", "1.2"}
|
||||
configs[ResourceConsumer] = Config{promoterE2eRegistry, "resource-consumer", "1.9"}
|
||||
configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[VolumeNFSServer] = Config{promoterE2eRegistry, "volume/nfs", "1.2"}
|
||||
configs[VolumeISCSIServer] = Config{promoterE2eRegistry, "volume/iscsi", "2.2"}
|
||||
configs[VolumeGlusterServer] = Config{promoterE2eRegistry, "volume/gluster", "1.2"}
|
||||
configs[VolumeRBDServer] = Config{promoterE2eRegistry, "volume/rbd", "1.0.3"}
|
||||
configs[WindowsServer] = Config{microsoftRegistry, "windows", "1809"}
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.5"}
|
||||
configs[Perl] = Config{list.PromoterE2eRegistry, "perl", "5.26"}
|
||||
configs[PrometheusDummyExporter] = Config{list.GcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{list.GcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-alpine"}
|
||||
configs[RegressionIssue74839] = Config{list.PromoterE2eRegistry, "regression-issue-74839", "1.2"}
|
||||
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.9"}
|
||||
configs[SdDummyExporter] = Config{list.GcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.2"}
|
||||
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.2"}
|
||||
configs[VolumeGlusterServer] = Config{list.PromoterE2eRegistry, "volume/gluster", "1.2"}
|
||||
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.3"}
|
||||
configs[WindowsServer] = Config{list.MicrosoftRegistry, "windows", "1809"}
|
||||
|
||||
// if requested, map all the SHAs into a known format based on the input
|
||||
originalImageConfigs := configs
|
||||
@ -306,7 +297,7 @@ func getRepositoryMappedConfig(index int, config Config, repo string) Config {
|
||||
|
||||
h := sha256.New()
|
||||
h.Write([]byte(pullSpec))
|
||||
hash := base64.RawURLEncoding.EncodeToString(h.Sum(nil)[:16])
|
||||
hash := base64.RawURLEncoding.EncodeToString(h.Sum(nil))[:16]
|
||||
|
||||
shortName := reCharSafe.ReplaceAllLiteralString(pullSpec, "-")
|
||||
shortName = reDashes.ReplaceAllLiteralString(shortName, "-")
|
||||
@ -358,8 +349,15 @@ func GetPauseImageName() string {
|
||||
return GetE2EImage(Pause)
|
||||
}
|
||||
|
||||
// ReplaceRegistryInImageURL replaces the registry in the image URL with a custom one
|
||||
// ReplaceRegistryInImageURL replaces the registry in the image URL with a custom one based
|
||||
// on the configured registries.
|
||||
func ReplaceRegistryInImageURL(imageURL string) (string, error) {
|
||||
return replaceRegistryInImageURLWithList(imageURL, registry)
|
||||
}
|
||||
|
||||
// replaceRegistryInImageURLWithList replaces the registry in the image URL with a custom one based
|
||||
// on the given registry list.
|
||||
func replaceRegistryInImageURLWithList(imageURL string, reg RegistryList) (string, error) {
|
||||
parts := strings.Split(imageURL, "/")
|
||||
countParts := len(parts)
|
||||
registryAndUser := strings.Join(parts[:countParts-1], "/")
|
||||
@ -373,6 +371,9 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) {
|
||||
}
|
||||
}
|
||||
last := strings.SplitN(parts[countParts-1], ":", 2)
|
||||
if len(last) == 1 {
|
||||
return "", fmt.Errorf("image %q is required to be in an image:tag format", imageURL)
|
||||
}
|
||||
config := getRepositoryMappedConfig(index, Config{
|
||||
registry: parts[0],
|
||||
name: strings.Join([]string{strings.Join(parts[1:countParts-1], "/"), last[0]}, "/"),
|
||||
@ -382,25 +383,37 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) {
|
||||
}
|
||||
|
||||
switch registryAndUser {
|
||||
case "gcr.io/kubernetes-e2e-test-images":
|
||||
registryAndUser = e2eRegistry
|
||||
case "k8s.gcr.io":
|
||||
registryAndUser = gcRegistry
|
||||
case "k8s.gcr.io/sig-storage":
|
||||
registryAndUser = sigStorageRegistry
|
||||
case "gcr.io/k8s-authenticated-test":
|
||||
registryAndUser = PrivateRegistry
|
||||
case "gcr.io/google-samples":
|
||||
registryAndUser = sampleRegistry
|
||||
case "gcr.io/gke-release":
|
||||
registryAndUser = gcrReleaseRegistry
|
||||
case "docker.io/library":
|
||||
registryAndUser = dockerLibraryRegistry
|
||||
case initRegistry.E2eRegistry:
|
||||
registryAndUser = reg.E2eRegistry
|
||||
case initRegistry.GcRegistry:
|
||||
registryAndUser = reg.GcRegistry
|
||||
case initRegistry.SigStorageRegistry:
|
||||
registryAndUser = reg.SigStorageRegistry
|
||||
case initRegistry.PrivateRegistry:
|
||||
registryAndUser = reg.PrivateRegistry
|
||||
case initRegistry.SampleRegistry:
|
||||
registryAndUser = reg.SampleRegistry
|
||||
case initRegistry.GcrReleaseRegistry:
|
||||
registryAndUser = reg.GcrReleaseRegistry
|
||||
case initRegistry.InvalidRegistry:
|
||||
registryAndUser = reg.InvalidRegistry
|
||||
case initRegistry.MicrosoftRegistry:
|
||||
registryAndUser = reg.MicrosoftRegistry
|
||||
case initRegistry.PromoterE2eRegistry:
|
||||
registryAndUser = reg.PromoterE2eRegistry
|
||||
case initRegistry.BuildImageRegistry:
|
||||
registryAndUser = reg.BuildImageRegistry
|
||||
case initRegistry.GcAuthenticatedRegistry:
|
||||
registryAndUser = reg.GcAuthenticatedRegistry
|
||||
case initRegistry.DockerLibraryRegistry:
|
||||
registryAndUser = reg.DockerLibraryRegistry
|
||||
case initRegistry.CloudProviderGcpRegistry:
|
||||
registryAndUser = reg.CloudProviderGcpRegistry
|
||||
default:
|
||||
if countParts == 1 {
|
||||
// We assume we found an image from docker hub library
|
||||
// e.g. openjdk -> docker.io/library/openjdk
|
||||
registryAndUser = dockerLibraryRegistry
|
||||
registryAndUser = reg.DockerLibraryRegistry
|
||||
break
|
||||
}
|
||||
|
||||
|
7
vendor/k8s.io/kubernetes/test/utils/pki_helpers.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/utils/pki_helpers.go
generated
vendored
@ -23,12 +23,11 @@ import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
@ -59,10 +58,10 @@ func NewSignedCert(cfg *certutil.Config, key crypto.Signer, caCert *x509.Certifi
|
||||
return nil, err
|
||||
}
|
||||
if len(cfg.CommonName) == 0 {
|
||||
return nil, errors.New("must specify a CommonName")
|
||||
return nil, fmt.Errorf("must specify a CommonName")
|
||||
}
|
||||
if len(cfg.Usages) == 0 {
|
||||
return nil, errors.New("must specify at least one ExtKeyUsage")
|
||||
return nil, fmt.Errorf("must specify at least one ExtKeyUsage")
|
||||
}
|
||||
|
||||
certTmpl := x509.Certificate{
|
||||
|
57
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
57
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -80,7 +80,7 @@ func WaitUntilPodIsScheduled(c clientset.Interface, name, namespace string, time
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Timed out after %v when waiting for pod %v/%v to start.", timeout, namespace, name)
|
||||
return nil, fmt.Errorf("timed out after %v when waiting for pod %v/%v to start", timeout, namespace, name)
|
||||
}
|
||||
|
||||
func RunPodAndGetNodeName(c clientset.Interface, pod *v1.Pod, timeout time.Duration) (string, error) {
|
||||
@ -357,7 +357,7 @@ func (config *DeploymentConfig) create() error {
|
||||
config.applyTo(&deployment.Spec.Template)
|
||||
|
||||
if err := CreateDeploymentWithRetries(config.Client, config.Namespace, deployment); err != nil {
|
||||
return fmt.Errorf("Error creating deployment: %v", err)
|
||||
return fmt.Errorf("error creating deployment: %v", err)
|
||||
}
|
||||
config.RCConfigLog("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, removePtr(deployment.Spec.Replicas))
|
||||
return nil
|
||||
@ -435,7 +435,7 @@ func (config *ReplicaSetConfig) create() error {
|
||||
config.applyTo(&rs.Spec.Template)
|
||||
|
||||
if err := CreateReplicaSetWithRetries(config.Client, config.Namespace, rs); err != nil {
|
||||
return fmt.Errorf("Error creating replica set: %v", err)
|
||||
return fmt.Errorf("error creating replica set: %v", err)
|
||||
}
|
||||
config.RCConfigLog("Created replica set with name: %v, namespace: %v, replica count: %v", rs.Name, config.Namespace, removePtr(rs.Spec.Replicas))
|
||||
return nil
|
||||
@ -509,7 +509,7 @@ func (config *JobConfig) create() error {
|
||||
config.applyTo(&job.Spec.Template)
|
||||
|
||||
if err := CreateJobWithRetries(config.Client, config.Namespace, job); err != nil {
|
||||
return fmt.Errorf("Error creating job: %v", err)
|
||||
return fmt.Errorf("error creating job: %v", err)
|
||||
}
|
||||
config.RCConfigLog("Created job with name: %v, namespace: %v, parallelism/completions: %v", job.Name, config.Namespace, job.Spec.Parallelism)
|
||||
return nil
|
||||
@ -628,7 +628,7 @@ func (config *RCConfig) create() error {
|
||||
config.applyTo(rc.Spec.Template)
|
||||
|
||||
if err := CreateRCWithRetries(config.Client, config.Namespace, rc); err != nil {
|
||||
return fmt.Errorf("Error creating replication controller: %v", err)
|
||||
return fmt.Errorf("error creating replication controller: %v", err)
|
||||
}
|
||||
config.RCConfigLog("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, removePtr(rc.Spec.Replicas))
|
||||
return nil
|
||||
@ -850,7 +850,7 @@ func (config *RCConfig) start() error {
|
||||
} else {
|
||||
config.RCConfigLog("Can't list pod debug info: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
|
||||
return fmt.Errorf("only %d pods started out of %d", oldRunning, config.Replicas)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -880,7 +880,7 @@ func StartPods(c clientset.Interface, replicas int, namespace string, podNamePre
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
|
||||
err := WaitForPodsWithLabelRunning(c, namespace, label)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
|
||||
return fmt.Errorf("error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -920,7 +920,7 @@ func WaitForEnoughPodsWithLabelRunning(c clientset.Interface, ns string, label l
|
||||
break
|
||||
}
|
||||
if !running {
|
||||
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
|
||||
return fmt.Errorf("timeout while waiting for pods with labels %q to be running", label.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1194,12 +1194,12 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
|
||||
return fmt.Errorf("error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when applying patch %v to Node %v: %s", string(patch), node.Name, err)
|
||||
return fmt.Errorf("too many conflicts when applying patch %v to Node %v: %s", string(patch), node.Name, err)
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
@ -1207,12 +1207,12 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error while preparing objects for node %s: %s", node.Name, err)
|
||||
return fmt.Errorf("error while preparing objects for node %s: %s", node.Name, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when creating objects for node %s: %s", node.Name, err)
|
||||
return fmt.Errorf("too many conflicts when creating objects for node %s: %s", node.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1220,9 +1220,10 @@ func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNo
|
||||
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
var node *v1.Node
|
||||
node, err = client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
|
||||
return fmt.Errorf("skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
|
||||
}
|
||||
updatedNode := strategy.CleanupNode(node)
|
||||
if apiequality.Semantic.DeepEqual(node, updatedNode) {
|
||||
@ -1232,12 +1233,12 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
|
||||
return fmt.Errorf("error when updating Node %v: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when trying to cleanup Node %v: %s", nodeName, err)
|
||||
return fmt.Errorf("too many conflicts when trying to cleanup Node %v: %s", nodeName, err)
|
||||
}
|
||||
|
||||
for attempt := 0; attempt < retries; attempt++ {
|
||||
@ -1246,12 +1247,12 @@ func DoCleanupNode(client clientset.Interface, nodeName string, strategy Prepare
|
||||
break
|
||||
}
|
||||
if !apierrors.IsConflict(err) {
|
||||
return fmt.Errorf("Error when cleaning up Node %v objects: %v", nodeName, err)
|
||||
return fmt.Errorf("error when cleaning up Node %v objects: %v", nodeName, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Too many conflicts when trying to cleanup Node %v objects: %s", nodeName, err)
|
||||
return fmt.Errorf("too many conflicts when trying to cleanup Node %v objects: %s", nodeName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1303,7 +1304,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.4.1",
|
||||
Image: "k8s.gcr.io/pause:3.5",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
@ -1321,7 +1322,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
|
||||
func makeCreatePod(client clientset.Interface, namespace string, podTemplate *v1.Pod) error {
|
||||
if err := CreatePodWithRetries(client, namespace, podTemplate); err != nil {
|
||||
return fmt.Errorf("Error creating pod: %v", err)
|
||||
return fmt.Errorf("error creating pod: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1450,7 +1451,7 @@ func createController(client clientset.Interface, controllerName, namespace stri
|
||||
},
|
||||
}
|
||||
if err := CreateRCWithRetries(client, namespace, rc); err != nil {
|
||||
return fmt.Errorf("Error creating replication controller: %v", err)
|
||||
return fmt.Errorf("error creating replication controller: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1557,7 +1558,7 @@ func (config *SecretConfig) Run() error {
|
||||
}
|
||||
|
||||
if err := CreateSecretWithRetries(config.Client, config.Namespace, secret); err != nil {
|
||||
return fmt.Errorf("Error creating secret: %v", err)
|
||||
return fmt.Errorf("error creating secret: %v", err)
|
||||
}
|
||||
config.LogFunc("Created secret %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
@ -1565,7 +1566,7 @@ func (config *SecretConfig) Run() error {
|
||||
|
||||
func (config *SecretConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("Secret"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("Error deleting secret: %v", err)
|
||||
return fmt.Errorf("error deleting secret: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted secret %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
@ -1615,7 +1616,7 @@ func (config *ConfigMapConfig) Run() error {
|
||||
}
|
||||
|
||||
if err := CreateConfigMapWithRetries(config.Client, config.Namespace, configMap); err != nil {
|
||||
return fmt.Errorf("Error creating configmap: %v", err)
|
||||
return fmt.Errorf("error creating configmap: %v", err)
|
||||
}
|
||||
config.LogFunc("Created configmap %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
@ -1623,7 +1624,7 @@ func (config *ConfigMapConfig) Run() error {
|
||||
|
||||
func (config *ConfigMapConfig) Stop() error {
|
||||
if err := DeleteResourceWithRetries(config.Client, api.Kind("ConfigMap"), config.Namespace, config.Name, metav1.DeleteOptions{}); err != nil {
|
||||
return fmt.Errorf("Error deleting configmap: %v", err)
|
||||
return fmt.Errorf("error deleting configmap: %v", err)
|
||||
}
|
||||
config.LogFunc("Deleted configmap %v/%v", config.Namespace, config.Name)
|
||||
return nil
|
||||
@ -1725,7 +1726,7 @@ type DaemonConfig struct {
|
||||
|
||||
func (config *DaemonConfig) Run() error {
|
||||
if config.Image == "" {
|
||||
config.Image = "k8s.gcr.io/pause:3.4.1"
|
||||
config.Image = "k8s.gcr.io/pause:3.5"
|
||||
}
|
||||
nameLabel := map[string]string{
|
||||
"name": config.Name + "-daemon",
|
||||
@ -1752,7 +1753,7 @@ func (config *DaemonConfig) Run() error {
|
||||
}
|
||||
|
||||
if err := CreateDaemonSetWithRetries(config.Client, config.Namespace, daemon); err != nil {
|
||||
return fmt.Errorf("Error creating daemonset: %v", err)
|
||||
return fmt.Errorf("error creating daemonset: %v", err)
|
||||
}
|
||||
|
||||
var nodes *v1.NodeList
|
||||
@ -1763,7 +1764,7 @@ func (config *DaemonConfig) Run() error {
|
||||
if err == nil {
|
||||
break
|
||||
} else if i+1 == retries {
|
||||
return fmt.Errorf("Error listing Nodes while waiting for DaemonSet %v: %v", config.Name, err)
|
||||
return fmt.Errorf("error listing Nodes while waiting for DaemonSet %v: %v", config.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/utils/update_resources.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/utils/update_resources.go
generated
vendored
@ -55,7 +55,7 @@ func ScaleResourceWithRetries(scalesGetter scaleclient.ScalesGetter, namespace,
|
||||
err = scale.WaitForScaleHasDesiredReplicas(scalesGetter, gvr.GroupResource(), name, namespace, size, waitForReplicas)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while scaling %s to %d replicas: %v", name, size, err)
|
||||
return fmt.Errorf("error while scaling %s to %d replicas: %v", name, size, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user