rebase: update kubernetes to v1.23.0

updating go dependency to latest kubernetes
released version i.e v1.23.0

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2021-12-08 19:20:47 +05:30
committed by mergify[bot]
parent 42403e2ba7
commit 5762da3e91
789 changed files with 49781 additions and 11501 deletions

View File

@ -37,7 +37,6 @@ rules:
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/controller/volume/scheduling
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
@ -193,6 +192,7 @@ rules:
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/internal/queue
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/testing
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util

View File

@ -17,7 +17,6 @@ limitations under the License.
package framework
import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"reflect"
"runtime"
"sync"
@ -73,7 +72,7 @@ func RunCleanupActions() {
}()
// Run unlocked.
for _, fn := range list {
e2elog.Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
fn()
}
}

View File

@ -58,6 +58,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
const tty = false
Logf("ExecWithOptions: Clientset creation")
req := f.ClientSet.CoreV1().RESTClient().Post().
Resource("pods").
Name(options.PodName).
@ -74,8 +75,8 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
}, scheme.ParameterCodec)
var stdout, stderr bytes.Buffer
Logf("ExecWithOptions: execute(POST %s %s)", req.URL())
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
if options.PreserveWhitespace {
return stdout.String(), stderr.String(), err
}

View File

@ -49,6 +49,7 @@ func Failf(format string, args ...interface{}) {
skip := 2
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
panic("unreachable")
}
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs

View File

@ -318,7 +318,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) {
output, err := g.getMetricsFromAPIServer()
if err != nil {
return APIServerMetrics{}, nil
return APIServerMetrics{}, err
}
return parseAPIServerMetrics(output)
}

View File

@ -563,18 +563,15 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(c)
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
}
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
// We should have at least 1 node in the zone which is schedulable.
if !IsNodeSchedulable(&node) {
continue
}
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
zones.Insert(zone)
}

View File

@ -34,7 +34,7 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const etcdImage = "3.5.0-0"
const etcdImage = "3.5.1-0"
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {

View File

@ -166,7 +166,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
if podConfig.NS == "" {
return nil, fmt.Errorf("Cannot create pod with empty namespace")
}
if len(podConfig.Command) == 0 && !NodeOSDistroIs("windows") {
if len(podConfig.Command) == 0 {
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
}

View File

@ -19,6 +19,8 @@ package pod
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@ -43,6 +45,11 @@ import (
// the pod has already reached completed state.
var errPodCompleted = fmt.Errorf("pod ran to completion")
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
// have their logs fetched.
const LabelLogOnPodFailure = "log-on-pod-failure"
// TODO: Move to its own subpkg.
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
func expectNoError(err error, explain ...interface{}) {
@ -335,6 +342,23 @@ func podContainerStarted(c clientset.Interface, namespace, podName string, conta
}
}
func isContainerRunning(c clientset.Interface, namespace, podName, containerName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
for _, cs := range statuses {
if cs.Name == containerName {
return cs.State.Running != nil, nil
}
}
}
return false, nil
}
}
// LogPodStates logs basic info of provided pods for debugging.
func LogPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
@ -388,14 +412,68 @@ func logPodTerminationMessages(pods []v1.Pod) {
}
}
// logPodLogs logs the container logs from pods in the given namespace. This can be helpful for debugging
// issues that do not cause the container to fail (e.g.: network connectivity issues)
// We will log the Pods that have the LabelLogOnPodFailure label. If there aren't any, we default to
// logging only the first 5 Pods. This requires the reportDir to be set, and the pods are logged into:
// {report_dir}/pods/{namespace}/{pod}/{container_name}/logs.txt
func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) {
if reportDir == "" {
return
}
var logPods []v1.Pod
for _, pod := range pods {
if _, ok := pod.Labels[LabelLogOnPodFailure]; ok {
logPods = append(logPods, pod)
}
}
maxPods := len(logPods)
// There are no pods with the LabelLogOnPodFailure label, we default to the first 5 Pods.
if maxPods == 0 {
logPods = pods
maxPods = len(pods)
if maxPods > 5 {
maxPods = 5
}
}
tailLen := 42
for i := 0; i < maxPods; i++ {
pod := logPods[i]
for _, container := range pod.Spec.Containers {
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
if err != nil {
e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
continue
}
logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name)
err = os.MkdirAll(logDir, 0755)
if err != nil {
e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err)
continue
}
logPath := filepath.Join(logDir, "logs.txt")
err = os.WriteFile(logPath, []byte(logs), 0644)
if err != nil {
e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
}
}
}
}
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("unable to fetch pod debug info: %v", err)
}
LogPodStates(pods.Items)
logPodTerminationMessages(pods.Items)
logPodLogs(c, namespace, pods.Items, reportDir)
}
// FilterNonRestartablePods filters out pods that will never get recreated if
@ -544,23 +622,23 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false, nil)
return getPodLogsInternal(c, namespace, podName, containerName, false, nil, nil)
}
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
sinceTime := metav1.NewTime(since)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime, nil)
}
// GetPreviousPodLogs returns the logs of the previous instance of the
// specified container (namespace/pod/container).
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true, nil)
return getPodLogsInternal(c, namespace, podName, containerName, true, nil, nil)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time) (string, error) {
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) {
request := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
@ -570,6 +648,9 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
if sinceTime != nil {
request.Param("sinceTime", sinceTime.Format(time.RFC3339))
}
if tailLines != nil {
request.Param("tailLines", strconv.Itoa(*tailLines))
}
logs, err := request.Do(context.TODO()).Raw()
if err != nil {
return "", err

View File

@ -21,7 +21,6 @@ import (
"context"
"errors"
"fmt"
"sync"
"text/tabwriter"
"time"
@ -114,8 +113,6 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
start := time.Now()
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var ignoreNotReady bool
badPods := []v1.Pod{}
desiredPods := 0
@ -550,3 +547,26 @@ func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string,
func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, podContainerStarted(c, namespace, podName, containerIndex))
}
// WaitForPodFailedReason wait for pod failed reason in status, for example "SysctlForbidden".
func WaitForPodFailedReason(c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error {
waitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {
pod, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if pod.Status.Reason == reason {
return true, nil
}
return false, nil
})
if waitErr != nil {
return fmt.Errorf("error waiting for pod SysctlForbidden status: %v", waitErr)
}
return nil
}
// WaitForContainerRunning waits for the given Pod container to have a state of running
func WaitForContainerRunning(c clientset.Interface, namespace, podName, containerName string, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, isContainerRunning(c, namespace, podName, containerName))
}

View File

@ -18,6 +18,7 @@ package framework
import (
"context"
"encoding/json"
"fmt"
"regexp"
"sync"
@ -27,7 +28,9 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
@ -37,6 +40,7 @@ import (
"github.com/onsi/gomega"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/pkg/kubelet/util/format"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
@ -146,6 +150,30 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
}))
}
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
namespace := c.f.Namespace.Name
podJS, err := json.Marshal(pod)
ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
ecPod := pod.DeepCopy()
ecPod.Spec.EphemeralContainers = append(ecPod.Spec.EphemeralContainers, *ec)
ecJS, err := json.Marshal(ecPod)
ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
return err
}
ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
return nil
}
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {

View File

@ -19,8 +19,11 @@ package framework
import (
"context"
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
@ -295,17 +298,40 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time
}
// create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume
var lastCreateErr error
err := wait.PollImmediate(29*time.Second, timeouts.PVCreate, func() (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.
// If quota failure strings are found for other platforms, they can be added to improve reliability when running
// many parallel test jobs in a single cloud account. This corresponds to controller-like behavior and
// to what we would recommend for general clients.
if strings.Contains(lastCreateErr.Error(), `googleapi: Error 403: Quota exceeded for quota group`) {
return false, nil
}
// if it was not a quota failure, fail immediately
return false, lastCreateErr
}
return true, nil
})
// if we have an error from creating the PV, use that instead of a timeout error
if lastCreateErr != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
}
if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
}
return pv, nil
return resultPV, nil
}
// CreatePV creates the PV resource. Fails test on error.
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, pv)
func CreatePV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, timeouts, pv)
}
// CreatePVC creates the PVC resource. Fails test on error.
@ -323,7 +349,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
// Note: in the pre-bind case the real PVC name, which is generated, is not
// known until after the PVC is instantiated. This is why the pvc is created
// before the pv.
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
// make the pvc spec
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
preBindMsg := ""
@ -344,7 +370,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind {
pv.Spec.ClaimRef.Name = pvc.Name
}
pv, err = createPV(c, pv)
pv, err = createPV(c, timeouts, pv)
if err != nil {
return nil, pvc, err
}
@ -358,7 +384,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
// Note: in the pre-bind case the real PV name, which is generated, is not
// known until after the PV is instantiated. This is why the pv is created
// before the pvc.
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
preBindMsg := ""
if preBind {
preBindMsg = " pre-bound"
@ -370,7 +396,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
// instantiate the pv
pv, err := createPV(c, pv)
pv, err := createPV(c, timeouts, pv)
if err != nil {
return nil, nil, err
}
@ -392,7 +418,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
// sees an error returned, it needs to decide what to do about entries in the maps.
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
// orphaned pvs are not deleted and will remain after the suite completes.
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
pvMap := make(PVMap, numpvs)
pvcMap := make(PVCMap, numpvcs)
extraPVCs := 0
@ -405,7 +431,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// create pvs and pvcs
for i := 0; i < pvsToCreate; i++ {
pv, pvc, err := CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err := CreatePVPVC(c, timeouts, pvConfig, pvcConfig, ns, false)
if err != nil {
return pvMap, pvcMap, err
}
@ -416,7 +442,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// create extra pvs or pvcs as needed
for i := 0; i < extraPVs; i++ {
pv := MakePersistentVolume(pvConfig)
pv, err := createPV(c, pv)
pv, err := createPV(c, timeouts, pv)
if err != nil {
return pvMap, pvcMap, err
}

View File

@ -43,15 +43,6 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
// New local storage types to support local storage capacity isolation
var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
var (
downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
execProbeTimeout featuregate.Feature = "ExecProbeTimeout"
csiMigration featuregate.Feature = "CSIMigration"
)
func skipInternalf(caller int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
framework.Logf(msg)
@ -127,6 +118,7 @@ func pruneStack(skip int) string {
// Skipf skips with information about why the test is being skipped.
func Skipf(format string, args ...interface{}) {
skipInternalf(1, format, args...)
panic("unreachable")
}
// SkipUnlessAtLeast skips if the value is less than the minValue.
@ -136,28 +128,17 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
}
}
// SkipUnlessLocalEphemeralStorageEnabled skips if the LocalStorageCapacityIsolation is not enabled.
func SkipUnlessLocalEphemeralStorageEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(localStorageCapacityIsolation) {
skipInternalf(1, "Only supported when %v feature is enabled", localStorageCapacityIsolation)
// SkipUnlessFeatureGateEnabled skips if the feature is disabled
func SkipUnlessFeatureGateEnabled(gate featuregate.Feature) {
if !utilfeature.DefaultFeatureGate.Enabled(gate) {
skipInternalf(1, "Only supported when %v feature is enabled", gate)
}
}
func SkipUnlessDownwardAPIHugePagesEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(downwardAPIHugePages) {
skipInternalf(1, "Only supported when %v feature is enabled", downwardAPIHugePages)
}
}
func SkipUnlessExecProbeTimeoutEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(execProbeTimeout) {
skipInternalf(1, "Only supported when %v feature is enabled", execProbeTimeout)
}
}
func SkipIfCSIMigrationEnabled() {
if utilfeature.DefaultFeatureGate.Enabled(csiMigration) {
skipInternalf(1, "Only supported when %v feature is disabled", csiMigration)
// SkipIfFeatureGateEnabled skips if the feature is enabled
func SkipIfFeatureGateEnabled(gate featuregate.Feature) {
if utilfeature.DefaultFeatureGate.Enabled(gate) {
skipInternalf(1, "Only supported when %v feature is disabled", gate)
}
}

View File

@ -135,7 +135,6 @@ type TestContextType struct {
GatherMetricsAfterTest string
GatherSuiteMetricsAfterTest bool
MaxNodesToGather int
AllowGatheringProfiles bool
// If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components.
IncludeClusterAutoscalerMetrics bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
@ -188,6 +187,10 @@ type TestContextType struct {
// SnapshotControllerHTTPPort the port used for communicating with the snapshot controller HTTP endpoint.
SnapshotControllerHTTPPort int
// RequireDevices makes mandatory on the environment on which tests are run 1+ devices exposed through device plugins.
// With this enabled The e2e tests requiring devices for their operation can assume that if devices aren't reported, the test can fail
RequireDevices bool
}
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
@ -292,7 +295,6 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.")
flags.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.")
flags.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.")
flags.BoolVar(&TestContext.AllowGatheringProfiles, "allow-gathering-profiles", true, "If set to true framework will allow to gather CPU/memory allocation pprof profiles from the master.")
flags.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.")
flags.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
flags.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
@ -338,6 +340,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
flags.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
flags.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
// NOTE: Node E2E tests have this flag defined as well, but true by default.
// If this becomes true as well, they should be refactored into RegisterCommonFlags.
flags.BoolVar(&TestContext.PrepullImages, "prepull-images", false, "If true, prepull images so image pull failures do not cause test failures.")
flags.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, skeleton (the fallback if not set), etc.)")
flags.StringVar(&TestContext.Tooling, "tooling", "", "The tooling in use (kops, gke, etc.)")
flags.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")

View File

@ -29,6 +29,7 @@ const (
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
pvCreateTimeout = 3 * time.Minute
pvDeleteTimeout = 3 * time.Minute
pvDeleteSlowTimeout = 20 * time.Minute
snapshotCreateTimeout = 5 * time.Minute
@ -67,6 +68,9 @@ type TimeoutContext struct {
// PVBound is how long PVs have to become bound.
PVBound time.Duration
// PVCreate is how long PVs have to be created.
PVCreate time.Duration
// PVDelete is how long PVs have to become deleted.
PVDelete time.Duration
@ -95,6 +99,7 @@ func NewTimeoutContextWithDefaults() *TimeoutContext {
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,
PVCreate: pvCreateTimeout,
PVDelete: pvDeleteTimeout,
PVDeleteSlow: pvDeleteSlowTimeout,
SnapshotCreate: snapshotCreateTimeout,

View File

@ -63,6 +63,7 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
@ -898,7 +899,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
return c.CoreV1().Events(ns).List(context.TODO(), opts)
}, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace, TestContext.ReportDir)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
@ -1265,7 +1266,7 @@ func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []stri
if err != nil {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(hostURL.Host) != nil {
if netutils.ParseIPSloppy(hostURL.Host) != nil {
externalIPs = append(externalIPs, hostURL.Host)
} else {
hostnames = append(hostnames, hostURL.Host)
@ -1376,7 +1377,7 @@ retriesLoop:
// NOTE the test may need access to the events to see what's going on, such as a change in status
actualWatchEvents := scenario(resourceWatch)
errs := sets.NewString()
ExpectEqual(len(expectedWatchEvents) <= len(actualWatchEvents), true, "Error: actual watch events amount (%d) must be greater than or equal to expected watch events amount (%d)", len(actualWatchEvents), len(expectedWatchEvents))
gomega.Expect(len(expectedWatchEvents)).To(gomega.BeNumerically("<=", len(actualWatchEvents)), "Did not get enough watch events")
totalValidWatchEvents := 0
foundEventIndexes := map[int]*int{}
@ -1405,7 +1406,9 @@ retriesLoop:
fmt.Println("invariants violated:\n", strings.Join(errs.List(), "\n - "))
continue retriesLoop
}
ExpectEqual(errs.Len() > 0, false, strings.Join(errs.List(), "\n - "))
if errs.Len() > 0 {
Failf("Unexpected error(s): %v", strings.Join(errs.List(), "\n - "))
}
ExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents))
break retriesLoop
}

View File

@ -458,14 +458,14 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
return clientPod, nil
}
func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests {
if test.Mode == v1.PersistentVolumeBlock {
// Block: check content
deviceName := fmt.Sprintf("/opt/%d", i)
commands := generateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
@ -474,7 +474,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
@ -485,14 +485,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
@ -531,7 +531,23 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}()
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
ec := &v1.EphemeralContainer{
EphemeralContainerCommon: v1.EphemeralContainerCommon(clientPod.Spec.Containers[0]),
}
ec.Name = "volume-ephemeral-container"
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
// BEGIN TODO: remove after EphemeralContainers feature gate is retired
if apierrors.IsNotFound(err) {
framework.Logf("Skipping ephemeral container re-test because feature is disabled (error: %q)", err)
return
}
// END TODO: remove after EphemeralContainers feature gate is retired
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
}
// InjectContent inserts index.html with given content into given volume. It does so by
@ -572,7 +588,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
// Check that the data have been really written in this pod.
// This tests non-persistent volume types
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
}
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
@ -583,7 +599,7 @@ func generateWriteCmd(content, path string) []string {
}
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
var commands []string
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
return commands