mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes and libraries to v1.22.0 version
Kubernetes v1.22 version has been released and this update ceph csi dependencies to use the same version. Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
e077c1fdf5
commit
aa698bc3e1
6
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/auth/helpers.go
generated
vendored
@ -18,10 +18,10 @@ package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
authorizationv1 "k8s.io/api/authorization/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -99,7 +99,7 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "binding clusterrole/%s for %q for %v", clusterRole, ns, subjects)
|
||||
return fmt.Errorf("binding clusterrole/%s for %q for %v: %w", clusterRole, ns, subjects, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -136,7 +136,7 @@ func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rb
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "binding %s/%s into %q for %v", roleType, role, ns, subjects)
|
||||
return fmt.Errorf("binding %s/%s into %q for %v: %w", roleType, role, ns, subjects, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
@ -17,6 +17,9 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -70,6 +73,7 @@ func RunCleanupActions() {
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
e2elog.Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
|
||||
fn()
|
||||
}
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -296,7 +296,7 @@ func (f *Framework) BeforeEach() {
|
||||
|
||||
gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master"
|
||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
} else {
|
||||
@ -449,7 +449,7 @@ func (f *Framework) AfterEach() {
|
||||
ginkgo.By("Gathering metrics")
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
@ -142,6 +142,10 @@ func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, p
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
// TODO(mauriciopoppe): remove this statement once we add `sync` to the test image, ref #101172
|
||||
if e2epod.NodeOSDistroIs("windows") {
|
||||
command = fmt.Sprintf("echo '%s' > '%s';", contents, path)
|
||||
}
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
@ -139,7 +139,7 @@ func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (KubeletM
|
||||
if c == nil {
|
||||
return GrabKubeletMetricsWithoutProxy(nodeName, "/metrics")
|
||||
}
|
||||
grabber, err := NewMetricsGrabber(c, nil, true, false, false, false, false)
|
||||
grabber, err := NewMetricsGrabber(c, nil, nil, true, false, false, false, false, false)
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
|
292
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
292
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -18,62 +18,89 @@ package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
// insecureSchedulerPort is the default port for the scheduler status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeSchedulerPort instead.
|
||||
insecureSchedulerPort = 10251
|
||||
// insecureKubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeControllerManagerPort instead.
|
||||
insecureKubeControllerManagerPort = 10252
|
||||
// kubeSchedulerPort is the default port for the scheduler status server.
|
||||
kubeSchedulerPort = 10259
|
||||
// kubeControllerManagerPort is the default port for the controller manager status server.
|
||||
kubeControllerManagerPort = 10257
|
||||
// snapshotControllerPort is the port for the snapshot controller
|
||||
snapshotControllerPort = 9102
|
||||
)
|
||||
|
||||
// MetricsGrabbingDisabledError is an error that is wrapped by the
|
||||
// different MetricsGrabber.Wrap functions when metrics grabbing is
|
||||
// not supported. Tests that check metrics data should then skip
|
||||
// the check.
|
||||
var MetricsGrabbingDisabledError = errors.New("metrics grabbing disabled")
|
||||
|
||||
// Collection is metrics collection of components
|
||||
type Collection struct {
|
||||
APIServerMetrics APIServerMetrics
|
||||
ControllerManagerMetrics ControllerManagerMetrics
|
||||
KubeletMetrics map[string]KubeletMetrics
|
||||
SchedulerMetrics SchedulerMetrics
|
||||
ClusterAutoscalerMetrics ClusterAutoscalerMetrics
|
||||
APIServerMetrics APIServerMetrics
|
||||
ControllerManagerMetrics ControllerManagerMetrics
|
||||
SnapshotControllerMetrics SnapshotControllerMetrics
|
||||
KubeletMetrics map[string]KubeletMetrics
|
||||
SchedulerMetrics SchedulerMetrics
|
||||
ClusterAutoscalerMetrics ClusterAutoscalerMetrics
|
||||
}
|
||||
|
||||
// Grabber provides functions which grab metrics from components
|
||||
type Grabber struct {
|
||||
client clientset.Interface
|
||||
externalClient clientset.Interface
|
||||
grabFromAPIServer bool
|
||||
grabFromControllerManager bool
|
||||
grabFromKubelets bool
|
||||
grabFromScheduler bool
|
||||
grabFromClusterAutoscaler bool
|
||||
kubeScheduler string
|
||||
kubeControllerManager string
|
||||
waitForControllerManagerReadyOnce sync.Once
|
||||
client clientset.Interface
|
||||
externalClient clientset.Interface
|
||||
config *rest.Config
|
||||
grabFromAPIServer bool
|
||||
grabFromControllerManager bool
|
||||
grabFromKubelets bool
|
||||
grabFromScheduler bool
|
||||
grabFromClusterAutoscaler bool
|
||||
grabFromSnapshotController bool
|
||||
kubeScheduler string
|
||||
waitForSchedulerReadyOnce sync.Once
|
||||
kubeControllerManager string
|
||||
waitForControllerManagerReadyOnce sync.Once
|
||||
snapshotController string
|
||||
waitForSnapshotControllerReadyOnce sync.Once
|
||||
}
|
||||
|
||||
// NewMetricsGrabber returns new metrics which are initialized.
|
||||
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) {
|
||||
// NewMetricsGrabber prepares for grabbing metrics data from several different
|
||||
// components. It should be called when those components are running because
|
||||
// it needs to communicate with them to determine for which components
|
||||
// metrics data can be retrieved.
|
||||
//
|
||||
// Collecting metrics data is an optional debug feature. Not all clusters will
|
||||
// support it. If disabled for a component, the corresponding Grab function
|
||||
// will immediately return an error derived from MetricsGrabbingDisabledError.
|
||||
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) {
|
||||
|
||||
kubeScheduler := ""
|
||||
kubeControllerManager := ""
|
||||
snapshotControllerManager := ""
|
||||
|
||||
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
|
||||
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
|
||||
regSnapshotController := regexp.MustCompile("volume-snapshot-controller.*")
|
||||
|
||||
if (scheduler || controllers) && config == nil {
|
||||
return nil, errors.New("a rest config is required for grabbing kube-controller and kube-controller-manager metrics")
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -89,35 +116,55 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
|
||||
if regKubeControllerManager.MatchString(pod.Name) {
|
||||
kubeControllerManager = pod.Name
|
||||
}
|
||||
if kubeScheduler != "" && kubeControllerManager != "" {
|
||||
if regSnapshotController.MatchString(pod.Name) {
|
||||
snapshotControllerManager = pod.Name
|
||||
}
|
||||
if kubeScheduler != "" && kubeControllerManager != "" && snapshotControllerManager != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
if kubeScheduler == "" {
|
||||
scheduler = false
|
||||
klog.Warningf("Can't find kube-scheduler pod. Grabbing metrics from kube-scheduler is disabled.")
|
||||
}
|
||||
if kubeControllerManager == "" {
|
||||
controllers = false
|
||||
klog.Warningf("Can't find kube-controller-manager pod. Grabbing metrics from kube-controller-manager is disabled.")
|
||||
}
|
||||
if ec == nil {
|
||||
if clusterAutoscaler && ec == nil {
|
||||
klog.Warningf("Did not receive an external client interface. Grabbing metrics from ClusterAutoscaler is disabled.")
|
||||
}
|
||||
|
||||
return &Grabber{
|
||||
client: c,
|
||||
externalClient: ec,
|
||||
grabFromAPIServer: apiServer,
|
||||
grabFromControllerManager: controllers,
|
||||
grabFromKubelets: kubelets,
|
||||
grabFromScheduler: scheduler,
|
||||
grabFromClusterAutoscaler: clusterAutoscaler,
|
||||
kubeScheduler: kubeScheduler,
|
||||
kubeControllerManager: kubeControllerManager,
|
||||
client: c,
|
||||
externalClient: ec,
|
||||
config: config,
|
||||
grabFromAPIServer: apiServer,
|
||||
grabFromControllerManager: checkPodDebugHandlers(c, controllers, "kube-controller-manager", kubeControllerManager),
|
||||
grabFromKubelets: kubelets,
|
||||
grabFromScheduler: checkPodDebugHandlers(c, scheduler, "kube-scheduler", kubeScheduler),
|
||||
grabFromClusterAutoscaler: clusterAutoscaler,
|
||||
grabFromSnapshotController: checkPodDebugHandlers(c, snapshotController, "snapshot-controller", snapshotControllerManager),
|
||||
kubeScheduler: kubeScheduler,
|
||||
kubeControllerManager: kubeControllerManager,
|
||||
snapshotController: snapshotControllerManager,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func checkPodDebugHandlers(c clientset.Interface, requested bool, component, podName string) bool {
|
||||
if !requested {
|
||||
return false
|
||||
}
|
||||
if podName == "" {
|
||||
klog.Warningf("Can't find %s pod. Grabbing metrics from %s is disabled.", component, component)
|
||||
return false
|
||||
}
|
||||
|
||||
// The debug handlers on the host where the pod runs might be disabled.
|
||||
// We can check that indirectly by trying to retrieve log output.
|
||||
limit := int64(1)
|
||||
if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(context.TODO()); err != nil {
|
||||
klog.Warningf("Can't retrieve log output of %s (%q). Debug handlers might be disabled in kubelet. Grabbing metrics from %s is disabled.",
|
||||
podName, err, component)
|
||||
return false
|
||||
}
|
||||
|
||||
// Metrics gathering enabled.
|
||||
return true
|
||||
}
|
||||
|
||||
// HasControlPlanePods returns true if metrics grabber was able to find control-plane pods
|
||||
func (g *Grabber) HasControlPlanePods() bool {
|
||||
return g.kubeScheduler != "" && g.kubeControllerManager != ""
|
||||
@ -149,20 +196,38 @@ func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (Kub
|
||||
|
||||
// GrabFromScheduler returns metrics from scheduler
|
||||
func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
|
||||
if g.kubeScheduler == "" {
|
||||
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler pod is not registered. Skipping Scheduler's metrics gathering")
|
||||
if !g.grabFromScheduler {
|
||||
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, g.kubeScheduler, metav1.NamespaceSystem, insecureSchedulerPort)
|
||||
|
||||
var err error
|
||||
|
||||
g.waitForSchedulerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeScheduler, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
|
||||
var lastMetricsFetchErr error
|
||||
var output string
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err := fmt.Errorf("error waiting for kube-scheduler pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
|
||||
return parseSchedulerMetrics(output)
|
||||
}
|
||||
|
||||
// GrabFromClusterAutoscaler returns metrics from cluster autoscaler
|
||||
func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) {
|
||||
if !g.HasControlPlanePods() && g.externalClient == nil {
|
||||
return ClusterAutoscalerMetrics{}, fmt.Errorf("Did not find control-plane pods. Skipping ClusterAutoscaler's metrics gathering")
|
||||
return ClusterAutoscalerMetrics{}, fmt.Errorf("ClusterAutoscaler: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
var client clientset.Interface
|
||||
var namespace string
|
||||
@ -182,38 +247,73 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
|
||||
|
||||
// GrabFromControllerManager returns metrics from controller manager
|
||||
func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) {
|
||||
if g.kubeControllerManager == "" {
|
||||
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager pod is not registered. Skipping ControllerManager's metrics gathering")
|
||||
if !g.grabFromControllerManager {
|
||||
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
|
||||
var err error
|
||||
podName := g.kubeControllerManager
|
||||
g.waitForControllerManagerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for controller manager pod to be ready: %w", readyErr)
|
||||
return
|
||||
}
|
||||
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
_, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err = fmt.Errorf("error waiting for controller manager pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return
|
||||
g.waitForControllerManagerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, g.kubeControllerManager, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
|
||||
output, err := g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, insecureKubeControllerManagerPort)
|
||||
if err != nil {
|
||||
var output string
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err := fmt.Errorf("error waiting for kube-controller-manager to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
|
||||
return parseControllerManagerMetrics(output)
|
||||
}
|
||||
|
||||
// GrabFromSnapshotController returns metrics from controller manager
|
||||
func (g *Grabber) GrabFromSnapshotController(podName string, port int) (SnapshotControllerMetrics, error) {
|
||||
if !g.grabFromSnapshotController {
|
||||
return SnapshotControllerMetrics{}, fmt.Errorf("volume-snapshot-controller: %w", MetricsGrabbingDisabledError)
|
||||
}
|
||||
|
||||
// Use overrides if provided via test config flags.
|
||||
// Otherwise, use the default volume-snapshot-controller pod name and port.
|
||||
if podName == "" {
|
||||
podName = g.snapshotController
|
||||
}
|
||||
if port == 0 {
|
||||
port = snapshotControllerPort
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
g.waitForSnapshotControllerReadyOnce.Do(func() {
|
||||
if readyErr := e2epod.WaitForPodsReady(g.client, metav1.NamespaceSystem, podName, 0); readyErr != nil {
|
||||
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return SnapshotControllerMetrics{}, err
|
||||
}
|
||||
|
||||
var output string
|
||||
var lastMetricsFetchErr error
|
||||
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
output, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, port)
|
||||
return lastMetricsFetchErr == nil, nil
|
||||
}); metricsWaitErr != nil {
|
||||
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
|
||||
return SnapshotControllerMetrics{}, err
|
||||
}
|
||||
|
||||
return parseSnapshotControllerMetrics(output)
|
||||
}
|
||||
|
||||
// GrabFromAPIServer returns metrics from API server
|
||||
func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) {
|
||||
output, err := g.getMetricsFromAPIServer()
|
||||
@ -251,6 +351,14 @@ func (g *Grabber) Grab() (Collection, error) {
|
||||
result.ControllerManagerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromSnapshotController {
|
||||
metrics, err := g.GrabFromSnapshotController(g.snapshotController, snapshotControllerPort)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
result.SnapshotControllerMetrics = metrics
|
||||
}
|
||||
}
|
||||
if g.grabFromClusterAutoscaler {
|
||||
metrics, err := g.GrabFromClusterAutoscaler()
|
||||
if err != nil {
|
||||
@ -281,12 +389,13 @@ func (g *Grabber) Grab() (Collection, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// getMetricsFromPod retrieves metrics data from an insecure port.
|
||||
func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) {
|
||||
rawOutput, err := client.CoreV1().RESTClient().Get().
|
||||
Namespace(namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", podName, port)).
|
||||
Name(fmt.Sprintf("%s:%d", podName, port)).
|
||||
Suffix("metrics").
|
||||
Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
@ -294,3 +403,50 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string,
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
||||
|
||||
// getSecureMetricsFromPod retrieves metrics from a pod that uses TLS
|
||||
// and checks client credentials. Conceptually this function is
|
||||
// similar to "kubectl port-forward" + "kubectl get --raw
|
||||
// https://localhost:<port>/metrics". It uses the same credentials
|
||||
// as kubelet.
|
||||
func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port int) (string, error) {
|
||||
dialer := e2epod.NewDialer(g.client, g.config)
|
||||
metricConfig := rest.CopyConfig(g.config)
|
||||
addr := e2epod.Addr{
|
||||
Namespace: namespace,
|
||||
PodName: podName,
|
||||
Port: port,
|
||||
}
|
||||
metricConfig.Dial = func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return dialer.DialContainerPort(ctx, addr)
|
||||
}
|
||||
// This should make it possible verify the server, but while it
|
||||
// got past the server name check, certificate validation
|
||||
// still failed.
|
||||
metricConfig.Host = addr.String()
|
||||
metricConfig.ServerName = "localhost"
|
||||
// Verifying the pod certificate with the same root CA
|
||||
// as for the API server led to an error about "unknown root
|
||||
// certificate". Disabling certificate checking on the client
|
||||
// side gets around that and should be good enough for
|
||||
// E2E testing.
|
||||
metricConfig.Insecure = true
|
||||
metricConfig.CAFile = ""
|
||||
metricConfig.CAData = nil
|
||||
|
||||
// clientset.NewForConfig is used because
|
||||
// metricClient.RESTClient() is directly usable, in contrast
|
||||
// to the client constructed by rest.RESTClientFor().
|
||||
metricClient, err := clientset.NewForConfig(metricConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rawOutput, err := metricClient.RESTClient().Get().
|
||||
AbsPath("metrics").
|
||||
Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
||||
|
40
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/snapshot_controller_metrics.go
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/snapshot_controller_metrics.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import "k8s.io/component-base/metrics/testutil"
|
||||
|
||||
// SnapshotControllerMetrics is metrics for controller manager
|
||||
type SnapshotControllerMetrics testutil.Metrics
|
||||
|
||||
// Equal returns true if all metrics are the same as the arguments.
|
||||
func (m *SnapshotControllerMetrics) Equal(o SnapshotControllerMetrics) bool {
|
||||
return (*testutil.Metrics)(m).Equal(testutil.Metrics(o))
|
||||
}
|
||||
|
||||
func newSnapshotControllerMetrics() SnapshotControllerMetrics {
|
||||
result := testutil.NewMetrics()
|
||||
return SnapshotControllerMetrics(result)
|
||||
}
|
||||
|
||||
func parseSnapshotControllerMetrics(data string) (SnapshotControllerMetrics, error) {
|
||||
result := newSnapshotControllerMetrics()
|
||||
if err := testutil.ParseMetrics(data, (*testutil.Metrics)(&result)); err != nil {
|
||||
return SnapshotControllerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
25
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -561,6 +561,31 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
|
||||
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||
}
|
||||
|
||||
// collect values of zone label from all nodes
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
// We should have at least 1 node in the zone which is schedulable.
|
||||
if !IsNodeSchedulable(&node) {
|
||||
continue
|
||||
}
|
||||
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
|
||||
if zone, found := node.Labels[v1.LabelTopologyZone]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -214,8 +214,12 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
}
|
||||
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
var terminalListNodesErr error
|
||||
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
||||
return false, err
|
||||
if attempt >= 3 {
|
||||
terminalListNodesErr = err
|
||||
}
|
||||
return false, terminalListNodesErr
|
||||
}
|
||||
for _, node := range allNodes.Items {
|
||||
if !readyForTests(&node, nonblockingTaints) {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -34,7 +34,7 @@ import (
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const etcdImage = "3.4.13-0"
|
||||
const etcdImage = "3.5.0-0"
|
||||
|
||||
// EtcdUpgrade upgrades etcd on GCE.
|
||||
func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
|
43
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
43
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -19,7 +19,6 @@ package pod
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -154,15 +153,7 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Volumes = volumes
|
||||
setVolumes(&podSpec.Spec, pvclaims, nil /*inline volume sources*/, false /*PVCs readonly*/)
|
||||
if nodeSelector != nil {
|
||||
podSpec.Spec.NodeSelector = nodeSelector
|
||||
}
|
||||
@ -175,11 +166,12 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
if podConfig.NS == "" {
|
||||
return nil, fmt.Errorf("Cannot create pod with empty namespace")
|
||||
}
|
||||
if len(podConfig.Command) == 0 {
|
||||
if len(podConfig.Command) == 0 && !NodeOSDistroIs("windows") {
|
||||
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
if podConfig.FsGroup == nil && runtime.GOOS != "windows" {
|
||||
if podConfig.FsGroup == nil && !NodeOSDistroIs("windows") {
|
||||
podConfig.FsGroup = func(i int64) *int64 {
|
||||
return &i
|
||||
}(1000)
|
||||
@ -223,33 +215,42 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
|
||||
podSpec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
|
||||
}
|
||||
|
||||
setVolumes(podSpec, podConfig.PVCs, podConfig.InlineVolumeSources, podConfig.PVCsReadOnly)
|
||||
SetNodeSelection(podSpec, podConfig.NodeSelection)
|
||||
return podSpec
|
||||
}
|
||||
|
||||
func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, pvcsReadOnly bool) {
|
||||
var volumeMounts = make([]v1.VolumeMount, 0)
|
||||
var volumeDevices = make([]v1.VolumeDevice, 0)
|
||||
var volumes = make([]v1.Volume, len(podConfig.PVCs)+len(podConfig.InlineVolumeSources))
|
||||
var volumes = make([]v1.Volume, len(pvcs)+len(inlineVolumeSources))
|
||||
volumeIndex := 0
|
||||
for _, pvclaim := range podConfig.PVCs {
|
||||
for _, pvclaim := range pvcs {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
|
||||
} else {
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
}
|
||||
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}}
|
||||
volumes[volumeIndex] = v1.Volume{
|
||||
Name: volumename,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvclaim.Name,
|
||||
ReadOnly: pvcsReadOnly,
|
||||
},
|
||||
},
|
||||
}
|
||||
volumeIndex++
|
||||
}
|
||||
for _, src := range podConfig.InlineVolumeSources {
|
||||
for _, src := range inlineVolumeSources {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
// In-line volumes can be only filesystem, not block.
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
|
||||
volumeIndex++
|
||||
}
|
||||
|
||||
podSpec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Volumes = volumes
|
||||
|
||||
SetNodeSelection(podSpec, podConfig.NodeSelection)
|
||||
return podSpec
|
||||
}
|
||||
|
215
vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go
generated
vendored
Normal file
215
vendor/k8s.io/kubernetes/test/e2e/framework/pod/dial.go
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NewTransport creates a transport which uses the port forward dialer.
|
||||
// URLs must use <namespace>.<pod>:<port> as host.
|
||||
func NewTransport(client kubernetes.Interface, restConfig *rest.Config) *http.Transport {
|
||||
return &http.Transport{
|
||||
DialContext: func(ctx context.Context, _, addr string) (net.Conn, error) {
|
||||
dialer := NewDialer(client, restConfig)
|
||||
a, err := ParseAddr(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dialer.DialContainerPort(ctx, *a)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDialer creates a dialer that supports connecting to container ports.
|
||||
func NewDialer(client kubernetes.Interface, restConfig *rest.Config) *Dialer {
|
||||
return &Dialer{
|
||||
client: client,
|
||||
restConfig: restConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// Dialer holds the relevant parameters that are independent of a particular connection.
|
||||
type Dialer struct {
|
||||
client kubernetes.Interface
|
||||
restConfig *rest.Config
|
||||
}
|
||||
|
||||
// DialContainerPort connects to a certain container port in a pod.
|
||||
func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Conn, finalErr error) {
|
||||
restClient := d.client.CoreV1().RESTClient()
|
||||
restConfig := d.restConfig
|
||||
if restConfig.GroupVersion == nil {
|
||||
restConfig.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if restConfig.NegotiatedSerializer == nil {
|
||||
restConfig.NegotiatedSerializer = scheme.Codecs
|
||||
}
|
||||
|
||||
// The setup code around the actual portforward is from
|
||||
// https://github.com/kubernetes/kubernetes/blob/c652ffbe4a29143623a1aaec39f745575f7e43ad/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go
|
||||
req := restClient.Post().
|
||||
Resource("pods").
|
||||
Namespace(addr.Namespace).
|
||||
Name(addr.PodName).
|
||||
SubResource("portforward")
|
||||
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create round tripper: %v", err)
|
||||
}
|
||||
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
|
||||
|
||||
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialer failed: %v", err)
|
||||
}
|
||||
requestID := "1"
|
||||
defer func() {
|
||||
if finalErr != nil {
|
||||
streamConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// create error stream
|
||||
headers := http.Header{}
|
||||
headers.Set(v1.StreamType, v1.StreamTypeError)
|
||||
headers.Set(v1.PortHeader, fmt.Sprintf("%d", addr.Port))
|
||||
headers.Set(v1.PortForwardRequestIDHeader, requestID)
|
||||
|
||||
// We're not writing to this stream, just reading an error message from it.
|
||||
// This happens asynchronously.
|
||||
errorStream, err := streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating error stream: %v", err)
|
||||
}
|
||||
errorStream.Close()
|
||||
go func() {
|
||||
message, err := ioutil.ReadAll(errorStream)
|
||||
switch {
|
||||
case err != nil:
|
||||
klog.ErrorS(err, "error reading from error stream")
|
||||
case len(message) > 0:
|
||||
klog.ErrorS(errors.New(string(message)), "an error occurred connecting to the remote port")
|
||||
}
|
||||
}()
|
||||
|
||||
// create data stream
|
||||
headers.Set(v1.StreamType, v1.StreamTypeData)
|
||||
dataStream, err := streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating data stream: %v", err)
|
||||
}
|
||||
|
||||
return &stream{
|
||||
Stream: dataStream,
|
||||
streamConn: streamConn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Addr contains all relevant parameters for a certain port in a pod.
|
||||
// The container should be running before connections are attempted,
|
||||
// otherwise the connection will fail.
|
||||
type Addr struct {
|
||||
Namespace, PodName string
|
||||
Port int
|
||||
}
|
||||
|
||||
var _ net.Addr = Addr{}
|
||||
|
||||
func (a Addr) Network() string {
|
||||
return "port-forwarding"
|
||||
}
|
||||
|
||||
func (a Addr) String() string {
|
||||
return fmt.Sprintf("%s.%s:%d", a.Namespace, a.PodName, a.Port)
|
||||
}
|
||||
|
||||
// ParseAddr expects a <namespace>.<pod>:<port number> as produced
|
||||
// by Addr.String.
|
||||
func ParseAddr(addr string) (*Addr, error) {
|
||||
parts := addrRegex.FindStringSubmatch(addr)
|
||||
if parts == nil {
|
||||
return nil, fmt.Errorf("%q: must match the format <namespace>.<pod>:<port number>", addr)
|
||||
}
|
||||
port, _ := strconv.Atoi(parts[3])
|
||||
return &Addr{
|
||||
Namespace: parts[1],
|
||||
PodName: parts[2],
|
||||
Port: port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var addrRegex = regexp.MustCompile(`^([^\.]+)\.([^:]+):(\d+)$`)
|
||||
|
||||
type stream struct {
|
||||
addr Addr
|
||||
httpstream.Stream
|
||||
streamConn httpstream.Connection
|
||||
}
|
||||
|
||||
var _ net.Conn = &stream{}
|
||||
|
||||
func (s *stream) Close() error {
|
||||
s.Stream.Close()
|
||||
s.streamConn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stream) LocalAddr() net.Addr {
|
||||
return LocalAddr{}
|
||||
}
|
||||
|
||||
func (s *stream) RemoteAddr() net.Addr {
|
||||
return s.addr
|
||||
}
|
||||
|
||||
func (s *stream) SetDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stream) SetReadDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stream) SetWriteDeadline(t time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type LocalAddr struct{}
|
||||
|
||||
var _ net.Addr = LocalAddr{}
|
||||
|
||||
func (l LocalAddr) Network() string { return "port-forwarding" }
|
||||
func (l LocalAddr) String() string { return "apiserver" }
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -34,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -37,14 +37,9 @@ func NodeOSDistroIs(distro string) bool {
|
||||
}
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
if !NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", command}
|
||||
}
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
return commands
|
||||
}
|
||||
|
||||
|
15
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -435,6 +435,21 @@ func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods
|
||||
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
}
|
||||
|
||||
// WaitForNumberOfPods waits up to timeout to ensure there are exact
|
||||
// `num` pods in namespace `ns`.
|
||||
// It returns the matching Pods or a timeout error.
|
||||
func WaitForNumberOfPods(c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
// ignore intermittent network error
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return len(pods.Items) == num, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
||||
// matching pod exists. Return the list of matching pods.
|
||||
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@ -181,7 +181,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
c := &pod.Spec.Containers[i]
|
||||
if c.ImagePullPolicy == v1.PullAlways {
|
||||
// If the image pull policy is PullAlways, the image doesn't need to be in
|
||||
// the white list or pre-pulled, because the image is expected to be pulled
|
||||
// the allow list or pre-pulled, because the image is expected to be pulled
|
||||
// in the test anyway.
|
||||
continue
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/ports.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/ports.go
generated
vendored
@ -22,10 +22,6 @@ const (
|
||||
// KubeletPort is the default port for the kubelet server on each host machine.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeletPort = 10250
|
||||
// InsecureKubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
// Deprecated: use the secure KubeControllerManagerPort instead.
|
||||
InsecureKubeControllerManagerPort = 10252
|
||||
// KubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeControllerManagerPort = 10257
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/provider.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/provider.go
generated
vendored
@ -21,8 +21,6 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
@ -78,7 +76,7 @@ func SetupProviderConfig(providerName string) (ProviderInterface, error) {
|
||||
defer mutex.Unlock()
|
||||
factory, ok := providers[providerName]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(os.ErrNotExist, "The provider %s is unknown.", providerName)
|
||||
return nil, fmt.Errorf("The provider %s is unknown: %w", providerName, os.ErrNotExist)
|
||||
}
|
||||
provider, err := factory()
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/psp.go
generated
vendored
@ -177,7 +177,13 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
})
|
||||
},
|
||||
rbacv1.Subject{
|
||||
Kind: rbacv1.GroupKind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: "system:serviceaccounts:" + namespace,
|
||||
},
|
||||
)
|
||||
ExpectNoError(err)
|
||||
ExpectNoError(e2eauth.WaitForNamedAuthorizationUpdate(kubeClient.AuthorizationV1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -49,6 +49,7 @@ var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIso
|
||||
var (
|
||||
downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
|
||||
execProbeTimeout featuregate.Feature = "ExecProbeTimeout"
|
||||
csiMigration featuregate.Feature = "CSIMigration"
|
||||
)
|
||||
|
||||
func skipInternalf(caller int, format string, args ...interface{}) {
|
||||
@ -154,6 +155,12 @@ func SkipUnlessExecProbeTimeoutEnabled() {
|
||||
}
|
||||
}
|
||||
|
||||
func SkipIfCSIMigrationEnabled() {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(csiMigration) {
|
||||
skipInternalf(1, "Only supported when %v feature is disabled", csiMigration)
|
||||
}
|
||||
}
|
||||
|
||||
// SkipIfMissingResource skips if the gvr resource is missing.
|
||||
func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, namespace string) {
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(namespace)
|
||||
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
44
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
@ -48,6 +49,9 @@ const (
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
|
||||
// sshBastionEnvKey is the environment variable key for running SSH commands via bastion.
|
||||
sshBastionEnvKey = "KUBE_SSH_BASTION"
|
||||
)
|
||||
|
||||
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
|
||||
@ -133,13 +137,45 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
len(hosts), len(nodelist.Items), nodelist)
|
||||
}
|
||||
|
||||
sshHosts := make([]string, 0, len(hosts))
|
||||
for _, h := range hosts {
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(h, SSHPort))
|
||||
lenHosts := len(hosts)
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(lenHosts)
|
||||
sshHosts := make([]string, 0, lenHosts)
|
||||
var sshHostsLock sync.Mutex
|
||||
|
||||
for _, host := range hosts {
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if canConnect(host) {
|
||||
e2elog.Logf("Assuming SSH on host %s", host)
|
||||
sshHostsLock.Lock()
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(host, SSHPort))
|
||||
sshHostsLock.Unlock()
|
||||
} else {
|
||||
e2elog.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
|
||||
}
|
||||
}(host)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return sshHosts, nil
|
||||
}
|
||||
|
||||
// canConnect returns true if a network connection is possible to the SSHPort.
|
||||
func canConnect(host string) bool {
|
||||
if _, ok := os.LookupEnv(sshBastionEnvKey); ok {
|
||||
return true
|
||||
}
|
||||
hostPort := net.JoinHostPort(host, SSHPort)
|
||||
conn, err := net.DialTimeout("tcp", hostPort, 3*time.Second)
|
||||
if err != nil {
|
||||
e2elog.Logf("cannot dial %s: %v", hostPort, err)
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// Result holds the execution result of SSH command
|
||||
type Result struct {
|
||||
User string
|
||||
@ -176,7 +212,7 @@ func SSH(cmd, host, provider string) (Result, error) {
|
||||
result.User = os.Getenv("USER")
|
||||
}
|
||||
|
||||
if bastion := os.Getenv("KUBE_SSH_BASTION"); len(bastion) > 0 {
|
||||
if bastion := os.Getenv(sshBastionEnvKey); len(bastion) > 0 {
|
||||
stdout, stderr, code, err := runSSHCommandViaBastion(cmd, result.User, bastion, host, signer)
|
||||
result.Stdout = stdout
|
||||
result.Stderr = stderr
|
||||
|
15
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -19,6 +19,7 @@ package framework
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -29,7 +30,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/pkg/errors"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
@ -182,6 +182,12 @@ type TestContextType struct {
|
||||
|
||||
// DockerConfigFile is a file that contains credentials which can be used to pull images from certain private registries, needed for a test.
|
||||
DockerConfigFile string
|
||||
|
||||
// SnapshotControllerPodName is the name used for identifying the snapshot controller pod.
|
||||
SnapshotControllerPodName string
|
||||
|
||||
// SnapshotControllerHTTPPort the port used for communicating with the snapshot controller HTTP endpoint.
|
||||
SnapshotControllerHTTPPort int
|
||||
}
|
||||
|
||||
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
|
||||
@ -223,6 +229,8 @@ type NodeTestContextType struct {
|
||||
// the node e2e test. If empty, the default one (system.DefaultSpec) is
|
||||
// used. The system specs are in test/e2e_node/system/specs/.
|
||||
SystemSpecName string
|
||||
// RestartKubelet restarts Kubelet unit when the process is killed.
|
||||
RestartKubelet bool
|
||||
// ExtraEnvs is a map of environment names to values.
|
||||
ExtraEnvs map[string]string
|
||||
}
|
||||
@ -315,6 +323,9 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
|
||||
flags.StringVar(&TestContext.SpecSummaryOutput, "spec-dump", "", "The file to dump all ginkgo.SpecSummary to after tests run. If empty, no objects are saved/printed.")
|
||||
flags.StringVar(&TestContext.DockerConfigFile, "docker-config-file", "", "A file that contains credentials which can be used to pull images from certain private registries, needed for a test.")
|
||||
|
||||
flags.StringVar(&TestContext.SnapshotControllerPodName, "snapshot-controller-pod-name", "", "The pod name to use for identifying the snapshot controller in the kube-system namespace.")
|
||||
flags.IntVar(&TestContext.SnapshotControllerHTTPPort, "snapshot-controller-http-port", 0, "The port to use for snapshot controller HTTP communication.")
|
||||
}
|
||||
|
||||
// RegisterClusterFlags registers flags specific to the cluster e2e test suite.
|
||||
@ -473,7 +484,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
var err error
|
||||
TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider)
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if os.IsNotExist(errors.Unwrap(err)) {
|
||||
// Provide a more helpful error message when the provider is unknown.
|
||||
var providers []string
|
||||
for _, name := range GetProviders() {
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testdata/a/foo.txt
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testdata/a/foo.txt
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Hello World
|
56
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testfiles.go
generated
vendored
56
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testfiles.go
generated
vendored
@ -25,13 +25,14 @@ limitations under the License.
|
||||
package testfiles
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -147,32 +148,47 @@ func (r RootFileSource) DescribeFiles() string {
|
||||
return description
|
||||
}
|
||||
|
||||
// BindataFileSource handles files stored in a package generated with bindata.
|
||||
type BindataFileSource struct {
|
||||
Asset func(string) ([]byte, error)
|
||||
AssetNames func() []string
|
||||
// EmbeddedFileSource handles files stored in a package generated with bindata.
|
||||
type EmbeddedFileSource struct {
|
||||
EmbeddedFS embed.FS
|
||||
Root string
|
||||
fileList []string
|
||||
}
|
||||
|
||||
// ReadTestFile looks for an asset with the given path.
|
||||
func (b BindataFileSource) ReadTestFile(filePath string) ([]byte, error) {
|
||||
fileBytes, err := b.Asset(filePath)
|
||||
// ReadTestFile looks for an embedded file with the given path.
|
||||
func (e EmbeddedFileSource) ReadTestFile(filepath string) ([]byte, error) {
|
||||
relativePath := strings.TrimPrefix(filepath, fmt.Sprintf("%s/", e.Root))
|
||||
|
||||
b, err := e.EmbeddedFS.ReadFile(relativePath)
|
||||
if err != nil {
|
||||
// It would be nice to have a better way to detect
|
||||
// "not found" errors :-/
|
||||
if strings.HasSuffix(err.Error(), "not found") {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return fileBytes, nil
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// DescribeFiles explains about gobindata and then lists all available files.
|
||||
func (b BindataFileSource) DescribeFiles() string {
|
||||
// DescribeFiles explains that it is looking inside an embedded filesystem
|
||||
func (e EmbeddedFileSource) DescribeFiles() string {
|
||||
var lines []string
|
||||
lines = append(lines, "The following files are built into the test executable via gobindata. For questions on maintaining gobindata, contact the sig-testing group.")
|
||||
assets := b.AssetNames()
|
||||
sort.Strings(assets)
|
||||
lines = append(lines, assets...)
|
||||
description := strings.Join(lines, "\n ")
|
||||
return description
|
||||
lines = append(lines, "The following files are embedded into the test executable:")
|
||||
|
||||
if len(e.fileList) == 0 {
|
||||
e.populateFileList()
|
||||
}
|
||||
lines = append(lines, e.fileList...)
|
||||
|
||||
return strings.Join(lines, "\n\t")
|
||||
}
|
||||
|
||||
func (e *EmbeddedFileSource) populateFileList() {
|
||||
fs.WalkDir(e.EmbeddedFS, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if !d.IsDir() {
|
||||
e.fileList = append(e.fileList, filepath.Join(e.Root, path))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
57
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
57
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
@ -20,19 +20,20 @@ import "time"
|
||||
|
||||
const (
|
||||
// Default timeouts to be used in TimeoutContext
|
||||
podStartTimeout = 5 * time.Minute
|
||||
podStartShortTimeout = 2 * time.Minute
|
||||
podStartSlowTimeout = 15 * time.Minute
|
||||
podDeleteTimeout = 5 * time.Minute
|
||||
claimProvisionTimeout = 5 * time.Minute
|
||||
claimProvisionShortTimeout = 1 * time.Minute
|
||||
claimBoundTimeout = 3 * time.Minute
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
snapshotDeleteTimeout = 5 * time.Minute
|
||||
podStartTimeout = 5 * time.Minute
|
||||
podStartShortTimeout = 2 * time.Minute
|
||||
podStartSlowTimeout = 15 * time.Minute
|
||||
podDeleteTimeout = 5 * time.Minute
|
||||
claimProvisionTimeout = 5 * time.Minute
|
||||
claimProvisionShortTimeout = 1 * time.Minute
|
||||
claimBoundTimeout = 3 * time.Minute
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
snapshotDeleteTimeout = 5 * time.Minute
|
||||
snapshotControllerMetricsTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// TimeoutContext contains timeout settings for several actions.
|
||||
@ -77,23 +78,27 @@ type TimeoutContext struct {
|
||||
|
||||
// SnapshotDelete is how long for snapshot to delete snapshotContent.
|
||||
SnapshotDelete time.Duration
|
||||
|
||||
// SnapshotControllerMetrics is how long to wait for snapshot controller metrics.
|
||||
SnapshotControllerMetrics time.Duration
|
||||
}
|
||||
|
||||
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values.
|
||||
func NewTimeoutContextWithDefaults() *TimeoutContext {
|
||||
return &TimeoutContext{
|
||||
PodStart: podStartTimeout,
|
||||
PodStartShort: podStartShortTimeout,
|
||||
PodStartSlow: podStartSlowTimeout,
|
||||
PodDelete: podDeleteTimeout,
|
||||
ClaimProvision: claimProvisionTimeout,
|
||||
ClaimProvisionShort: claimProvisionShortTimeout,
|
||||
ClaimBound: claimBoundTimeout,
|
||||
PVReclaim: pvReclaimTimeout,
|
||||
PVBound: pvBoundTimeout,
|
||||
PVDelete: pvDeleteTimeout,
|
||||
PVDeleteSlow: pvDeleteSlowTimeout,
|
||||
SnapshotCreate: snapshotCreateTimeout,
|
||||
SnapshotDelete: snapshotDeleteTimeout,
|
||||
PodStart: podStartTimeout,
|
||||
PodStartShort: podStartShortTimeout,
|
||||
PodStartSlow: podStartSlowTimeout,
|
||||
PodDelete: podDeleteTimeout,
|
||||
ClaimProvision: claimProvisionTimeout,
|
||||
ClaimProvisionShort: claimProvisionShortTimeout,
|
||||
ClaimBound: claimBoundTimeout,
|
||||
PVReclaim: pvReclaimTimeout,
|
||||
PVBound: pvBoundTimeout,
|
||||
PVDelete: pvDeleteTimeout,
|
||||
PVDeleteSlow: pvDeleteSlowTimeout,
|
||||
SnapshotCreate: snapshotCreateTimeout,
|
||||
SnapshotDelete: snapshotDeleteTimeout,
|
||||
SnapshotControllerMetrics: snapshotControllerMetricsTimeout,
|
||||
}
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/framework/volume/OWNERS
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/framework/volume/OWNERS
generated
vendored
@ -1,20 +1,11 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- saad-ali
|
||||
- sig-storage-approvers
|
||||
- rootfs
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
reviewers:
|
||||
- saad-ali
|
||||
- rootfs
|
||||
- gnufied
|
||||
- jingxu97
|
||||
- jsafrane
|
||||
- msau42
|
||||
- sig-storage-reviewers
|
||||
- jeffvance
|
||||
- copejon
|
||||
- verult
|
||||
- davidz627
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -368,11 +368,7 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
|
||||
var gracePeriod int64 = 1
|
||||
var command string
|
||||
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
command = "while true ; do sleep 2; done "
|
||||
} else {
|
||||
command = "while(1) {sleep 2}"
|
||||
}
|
||||
command = "while true ; do sleep 2; done "
|
||||
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
|
||||
clientPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -572,47 +568,30 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
func generateWriteCmd(content, path string) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", "echo '" + content + "' > " + path}
|
||||
}
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||
} else {
|
||||
// TODO: is there a way on windows to get the first X bytes from a device?
|
||||
commands = []string{"powershell", "/c", "type " + fullPath}
|
||||
}
|
||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateWriteBlockCmd generates the corresponding command lines to write to a block device the given content.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateWriteBlockCmd(content, fullPath string) []string {
|
||||
return generateWriteCmd(content, fullPath)
|
||||
}
|
||||
|
||||
// GenerateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateReadFileCmd(fullPath string) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"cat", fullPath}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", "type " + fullPath}
|
||||
}
|
||||
commands = []string{"cat", fullPath}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func generateWriteFileCmd(content, fullPath string) []string {
|
||||
return generateWriteCmd(content, fullPath)
|
||||
}
|
||||
@ -638,9 +617,6 @@ func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persi
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
|
||||
}
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user