rebase: update kubernetes to v1.23.0

updating go dependency to latest kubernetes
released version i.e v1.23.0

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2021-12-08 19:20:47 +05:30
committed by mergify[bot]
parent 42403e2ba7
commit 5762da3e91
789 changed files with 49781 additions and 11501 deletions

View File

@ -37,7 +37,6 @@ rules:
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/controller/volume/scheduling
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
@ -193,6 +192,7 @@ rules:
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/internal/queue
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/testing
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util

View File

@ -17,7 +17,6 @@ limitations under the License.
package framework
import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"reflect"
"runtime"
"sync"
@ -73,7 +72,7 @@ func RunCleanupActions() {
}()
// Run unlocked.
for _, fn := range list {
e2elog.Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
fn()
}
}

View File

@ -58,6 +58,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
const tty = false
Logf("ExecWithOptions: Clientset creation")
req := f.ClientSet.CoreV1().RESTClient().Post().
Resource("pods").
Name(options.PodName).
@ -74,8 +75,8 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
}, scheme.ParameterCodec)
var stdout, stderr bytes.Buffer
Logf("ExecWithOptions: execute(POST %s %s)", req.URL())
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
if options.PreserveWhitespace {
return stdout.String(), stderr.String(), err
}

View File

@ -49,6 +49,7 @@ func Failf(format string, args ...interface{}) {
skip := 2
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
panic("unreachable")
}
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs

View File

@ -318,7 +318,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) {
output, err := g.getMetricsFromAPIServer()
if err != nil {
return APIServerMetrics{}, nil
return APIServerMetrics{}, err
}
return parseAPIServerMetrics(output)
}

View File

@ -563,18 +563,15 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(c)
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
}
// collect values of zone label from all nodes
zones := sets.NewString()
for _, node := range nodes.Items {
// We should have at least 1 node in the zone which is schedulable.
if !IsNodeSchedulable(&node) {
continue
}
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
zones.Insert(zone)
}

View File

@ -34,7 +34,7 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
const etcdImage = "3.5.0-0"
const etcdImage = "3.5.1-0"
// EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error {

View File

@ -166,7 +166,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
if podConfig.NS == "" {
return nil, fmt.Errorf("Cannot create pod with empty namespace")
}
if len(podConfig.Command) == 0 && !NodeOSDistroIs("windows") {
if len(podConfig.Command) == 0 {
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
}

View File

@ -19,6 +19,8 @@ package pod
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@ -43,6 +45,11 @@ import (
// the pod has already reached completed state.
var errPodCompleted = fmt.Errorf("pod ran to completion")
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
// have their logs fetched.
const LabelLogOnPodFailure = "log-on-pod-failure"
// TODO: Move to its own subpkg.
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
func expectNoError(err error, explain ...interface{}) {
@ -335,6 +342,23 @@ func podContainerStarted(c clientset.Interface, namespace, podName string, conta
}
}
func isContainerRunning(c clientset.Interface, namespace, podName, containerName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
for _, cs := range statuses {
if cs.Name == containerName {
return cs.State.Running != nil, nil
}
}
}
return false, nil
}
}
// LogPodStates logs basic info of provided pods for debugging.
func LogPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
@ -388,14 +412,68 @@ func logPodTerminationMessages(pods []v1.Pod) {
}
}
// logPodLogs logs the container logs from pods in the given namespace. This can be helpful for debugging
// issues that do not cause the container to fail (e.g.: network connectivity issues)
// We will log the Pods that have the LabelLogOnPodFailure label. If there aren't any, we default to
// logging only the first 5 Pods. This requires the reportDir to be set, and the pods are logged into:
// {report_dir}/pods/{namespace}/{pod}/{container_name}/logs.txt
func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) {
if reportDir == "" {
return
}
var logPods []v1.Pod
for _, pod := range pods {
if _, ok := pod.Labels[LabelLogOnPodFailure]; ok {
logPods = append(logPods, pod)
}
}
maxPods := len(logPods)
// There are no pods with the LabelLogOnPodFailure label, we default to the first 5 Pods.
if maxPods == 0 {
logPods = pods
maxPods = len(pods)
if maxPods > 5 {
maxPods = 5
}
}
tailLen := 42
for i := 0; i < maxPods; i++ {
pod := logPods[i]
for _, container := range pod.Spec.Containers {
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
if err != nil {
e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
continue
}
logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name)
err = os.MkdirAll(logDir, 0755)
if err != nil {
e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err)
continue
}
logPath := filepath.Join(logDir, "logs.txt")
err = os.WriteFile(logPath, []byte(logs), 0644)
if err != nil {
e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
}
}
}
}
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("unable to fetch pod debug info: %v", err)
}
LogPodStates(pods.Items)
logPodTerminationMessages(pods.Items)
logPodLogs(c, namespace, pods.Items, reportDir)
}
// FilterNonRestartablePods filters out pods that will never get recreated if
@ -544,23 +622,23 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false, nil)
return getPodLogsInternal(c, namespace, podName, containerName, false, nil, nil)
}
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
sinceTime := metav1.NewTime(since)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime, nil)
}
// GetPreviousPodLogs returns the logs of the previous instance of the
// specified container (namespace/pod/container).
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true, nil)
return getPodLogsInternal(c, namespace, podName, containerName, true, nil, nil)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time) (string, error) {
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) {
request := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
@ -570,6 +648,9 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
if sinceTime != nil {
request.Param("sinceTime", sinceTime.Format(time.RFC3339))
}
if tailLines != nil {
request.Param("tailLines", strconv.Itoa(*tailLines))
}
logs, err := request.Do(context.TODO()).Raw()
if err != nil {
return "", err

View File

@ -21,7 +21,6 @@ import (
"context"
"errors"
"fmt"
"sync"
"text/tabwriter"
"time"
@ -114,8 +113,6 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
start := time.Now()
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var ignoreNotReady bool
badPods := []v1.Pod{}
desiredPods := 0
@ -550,3 +547,26 @@ func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string,
func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, podContainerStarted(c, namespace, podName, containerIndex))
}
// WaitForPodFailedReason wait for pod failed reason in status, for example "SysctlForbidden".
func WaitForPodFailedReason(c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error {
waitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {
pod, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if pod.Status.Reason == reason {
return true, nil
}
return false, nil
})
if waitErr != nil {
return fmt.Errorf("error waiting for pod SysctlForbidden status: %v", waitErr)
}
return nil
}
// WaitForContainerRunning waits for the given Pod container to have a state of running
func WaitForContainerRunning(c clientset.Interface, namespace, podName, containerName string, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, isContainerRunning(c, namespace, podName, containerName))
}

View File

@ -18,6 +18,7 @@ package framework
import (
"context"
"encoding/json"
"fmt"
"regexp"
"sync"
@ -27,7 +28,9 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
@ -37,6 +40,7 @@ import (
"github.com/onsi/gomega"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
"k8s.io/kubernetes/pkg/kubelet/util/format"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
)
@ -146,6 +150,30 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
}))
}
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
namespace := c.f.Namespace.Name
podJS, err := json.Marshal(pod)
ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
ecPod := pod.DeepCopy()
ecPod.Spec.EphemeralContainers = append(ecPod.Spec.EphemeralContainers, *ec)
ecJS, err := json.Marshal(ecPod)
ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
return err
}
ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
return nil
}
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {

View File

@ -19,8 +19,11 @@ package framework
import (
"context"
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/storage/utils"
"github.com/onsi/ginkgo"
@ -295,17 +298,40 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time
}
// create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume
var lastCreateErr error
err := wait.PollImmediate(29*time.Second, timeouts.PVCreate, func() (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.
// If quota failure strings are found for other platforms, they can be added to improve reliability when running
// many parallel test jobs in a single cloud account. This corresponds to controller-like behavior and
// to what we would recommend for general clients.
if strings.Contains(lastCreateErr.Error(), `googleapi: Error 403: Quota exceeded for quota group`) {
return false, nil
}
// if it was not a quota failure, fail immediately
return false, lastCreateErr
}
return true, nil
})
// if we have an error from creating the PV, use that instead of a timeout error
if lastCreateErr != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
}
if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
}
return pv, nil
return resultPV, nil
}
// CreatePV creates the PV resource. Fails test on error.
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, pv)
func CreatePV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
return createPV(c, timeouts, pv)
}
// CreatePVC creates the PVC resource. Fails test on error.
@ -323,7 +349,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
// Note: in the pre-bind case the real PVC name, which is generated, is not
// known until after the PVC is instantiated. This is why the pvc is created
// before the pv.
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
// make the pvc spec
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
preBindMsg := ""
@ -344,7 +370,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
if preBind {
pv.Spec.ClaimRef.Name = pvc.Name
}
pv, err = createPV(c, pv)
pv, err = createPV(c, timeouts, pv)
if err != nil {
return nil, pvc, err
}
@ -358,7 +384,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
// Note: in the pre-bind case the real PV name, which is generated, is not
// known until after the PV is instantiated. This is why the pv is created
// before the pvc.
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
preBindMsg := ""
if preBind {
preBindMsg = " pre-bound"
@ -370,7 +396,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
// instantiate the pv
pv, err := createPV(c, pv)
pv, err := createPV(c, timeouts, pv)
if err != nil {
return nil, nil, err
}
@ -392,7 +418,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
// sees an error returned, it needs to decide what to do about entries in the maps.
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
// orphaned pvs are not deleted and will remain after the suite completes.
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
pvMap := make(PVMap, numpvs)
pvcMap := make(PVCMap, numpvcs)
extraPVCs := 0
@ -405,7 +431,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// create pvs and pvcs
for i := 0; i < pvsToCreate; i++ {
pv, pvc, err := CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
pv, pvc, err := CreatePVPVC(c, timeouts, pvConfig, pvcConfig, ns, false)
if err != nil {
return pvMap, pvcMap, err
}
@ -416,7 +442,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
// create extra pvs or pvcs as needed
for i := 0; i < extraPVs; i++ {
pv := MakePersistentVolume(pvConfig)
pv, err := createPV(c, pv)
pv, err := createPV(c, timeouts, pv)
if err != nil {
return pvMap, pvcMap, err
}

View File

@ -43,15 +43,6 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
// New local storage types to support local storage capacity isolation
var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
var (
downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
execProbeTimeout featuregate.Feature = "ExecProbeTimeout"
csiMigration featuregate.Feature = "CSIMigration"
)
func skipInternalf(caller int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
framework.Logf(msg)
@ -127,6 +118,7 @@ func pruneStack(skip int) string {
// Skipf skips with information about why the test is being skipped.
func Skipf(format string, args ...interface{}) {
skipInternalf(1, format, args...)
panic("unreachable")
}
// SkipUnlessAtLeast skips if the value is less than the minValue.
@ -136,28 +128,17 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
}
}
// SkipUnlessLocalEphemeralStorageEnabled skips if the LocalStorageCapacityIsolation is not enabled.
func SkipUnlessLocalEphemeralStorageEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(localStorageCapacityIsolation) {
skipInternalf(1, "Only supported when %v feature is enabled", localStorageCapacityIsolation)
// SkipUnlessFeatureGateEnabled skips if the feature is disabled
func SkipUnlessFeatureGateEnabled(gate featuregate.Feature) {
if !utilfeature.DefaultFeatureGate.Enabled(gate) {
skipInternalf(1, "Only supported when %v feature is enabled", gate)
}
}
func SkipUnlessDownwardAPIHugePagesEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(downwardAPIHugePages) {
skipInternalf(1, "Only supported when %v feature is enabled", downwardAPIHugePages)
}
}
func SkipUnlessExecProbeTimeoutEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(execProbeTimeout) {
skipInternalf(1, "Only supported when %v feature is enabled", execProbeTimeout)
}
}
func SkipIfCSIMigrationEnabled() {
if utilfeature.DefaultFeatureGate.Enabled(csiMigration) {
skipInternalf(1, "Only supported when %v feature is disabled", csiMigration)
// SkipIfFeatureGateEnabled skips if the feature is enabled
func SkipIfFeatureGateEnabled(gate featuregate.Feature) {
if utilfeature.DefaultFeatureGate.Enabled(gate) {
skipInternalf(1, "Only supported when %v feature is disabled", gate)
}
}

View File

@ -135,7 +135,6 @@ type TestContextType struct {
GatherMetricsAfterTest string
GatherSuiteMetricsAfterTest bool
MaxNodesToGather int
AllowGatheringProfiles bool
// If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components.
IncludeClusterAutoscalerMetrics bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
@ -188,6 +187,10 @@ type TestContextType struct {
// SnapshotControllerHTTPPort the port used for communicating with the snapshot controller HTTP endpoint.
SnapshotControllerHTTPPort int
// RequireDevices makes mandatory on the environment on which tests are run 1+ devices exposed through device plugins.
// With this enabled The e2e tests requiring devices for their operation can assume that if devices aren't reported, the test can fail
RequireDevices bool
}
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
@ -292,7 +295,6 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.")
flags.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.")
flags.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.")
flags.BoolVar(&TestContext.AllowGatheringProfiles, "allow-gathering-profiles", true, "If set to true framework will allow to gather CPU/memory allocation pprof profiles from the master.")
flags.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.")
flags.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
flags.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
@ -338,6 +340,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
flags.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
flags.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
// NOTE: Node E2E tests have this flag defined as well, but true by default.
// If this becomes true as well, they should be refactored into RegisterCommonFlags.
flags.BoolVar(&TestContext.PrepullImages, "prepull-images", false, "If true, prepull images so image pull failures do not cause test failures.")
flags.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, skeleton (the fallback if not set), etc.)")
flags.StringVar(&TestContext.Tooling, "tooling", "", "The tooling in use (kops, gke, etc.)")
flags.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")

View File

@ -29,6 +29,7 @@ const (
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
pvCreateTimeout = 3 * time.Minute
pvDeleteTimeout = 3 * time.Minute
pvDeleteSlowTimeout = 20 * time.Minute
snapshotCreateTimeout = 5 * time.Minute
@ -67,6 +68,9 @@ type TimeoutContext struct {
// PVBound is how long PVs have to become bound.
PVBound time.Duration
// PVCreate is how long PVs have to be created.
PVCreate time.Duration
// PVDelete is how long PVs have to become deleted.
PVDelete time.Duration
@ -95,6 +99,7 @@ func NewTimeoutContextWithDefaults() *TimeoutContext {
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,
PVCreate: pvCreateTimeout,
PVDelete: pvDeleteTimeout,
PVDeleteSlow: pvDeleteSlowTimeout,
SnapshotCreate: snapshotCreateTimeout,

View File

@ -63,6 +63,7 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
netutils "k8s.io/utils/net"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
@ -898,7 +899,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
return c.CoreV1().Events(ns).List(context.TODO(), opts)
}, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace, TestContext.ReportDir)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
@ -1265,7 +1266,7 @@ func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []stri
if err != nil {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(hostURL.Host) != nil {
if netutils.ParseIPSloppy(hostURL.Host) != nil {
externalIPs = append(externalIPs, hostURL.Host)
} else {
hostnames = append(hostnames, hostURL.Host)
@ -1376,7 +1377,7 @@ retriesLoop:
// NOTE the test may need access to the events to see what's going on, such as a change in status
actualWatchEvents := scenario(resourceWatch)
errs := sets.NewString()
ExpectEqual(len(expectedWatchEvents) <= len(actualWatchEvents), true, "Error: actual watch events amount (%d) must be greater than or equal to expected watch events amount (%d)", len(actualWatchEvents), len(expectedWatchEvents))
gomega.Expect(len(expectedWatchEvents)).To(gomega.BeNumerically("<=", len(actualWatchEvents)), "Did not get enough watch events")
totalValidWatchEvents := 0
foundEventIndexes := map[int]*int{}
@ -1405,7 +1406,9 @@ retriesLoop:
fmt.Println("invariants violated:\n", strings.Join(errs.List(), "\n - "))
continue retriesLoop
}
ExpectEqual(errs.Len() > 0, false, strings.Join(errs.List(), "\n - "))
if errs.Len() > 0 {
Failf("Unexpected error(s): %v", strings.Join(errs.List(), "\n - "))
}
ExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents))
break retriesLoop
}

View File

@ -458,14 +458,14 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
return clientPod, nil
}
func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests {
if test.Mode == v1.PersistentVolumeBlock {
// Block: check content
deviceName := fmt.Sprintf("/opt/%d", i)
commands := generateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
@ -474,7 +474,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
@ -485,14 +485,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
@ -531,7 +531,23 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}()
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
ec := &v1.EphemeralContainer{
EphemeralContainerCommon: v1.EphemeralContainerCommon(clientPod.Spec.Containers[0]),
}
ec.Name = "volume-ephemeral-container"
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
// BEGIN TODO: remove after EphemeralContainers feature gate is retired
if apierrors.IsNotFound(err) {
framework.Logf("Skipping ephemeral container re-test because feature is disabled (error: %q)", err)
return
}
// END TODO: remove after EphemeralContainers feature gate is retired
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
}
// InjectContent inserts index.html with given content into given volume. It does so by
@ -572,7 +588,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
// Check that the data have been really written in this pod.
// This tests non-persistent volume types
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
}
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
@ -583,7 +599,7 @@ func generateWriteCmd(content, path string) []string {
}
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
var commands []string
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
return commands

View File

@ -70,6 +70,7 @@ type LocalTestResource struct {
// LocalTestResourceManager represents interface to create/destroy local test resources on node
type LocalTestResourceManager interface {
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
ExpandBlockDevice(ltr *LocalTestResource, mbToAdd int) error
Remove(ltr *LocalTestResource)
}
@ -289,6 +290,21 @@ func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
framework.ExpectNoError(err)
}
func (l *ltrMgr) expandLocalVolumeBlockFS(ltr *LocalTestResource, mbToAdd int) error {
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file conv=notrunc oflag=append bs=1M count=%d", ltr.loopDir, mbToAdd)
loopDev := l.findLoopDevice(ltr.loopDir, ltr.Node)
losetupCmd := fmt.Sprintf("losetup -c %s", loopDev)
return l.hostExec.IssueCommand(fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node)
}
func (l *ltrMgr) ExpandBlockDevice(ltr *LocalTestResource, mbtoAdd int) error {
switch ltr.VolumeType {
case LocalVolumeBlockFS:
return l.expandLocalVolumeBlockFS(ltr, mbtoAdd)
}
return fmt.Errorf("Failed to expand local test resource, unsupported volume type: %s", ltr.VolumeType)
}
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
var ltr *LocalTestResource
switch volumeType {

View File

@ -53,7 +53,7 @@ func NewAdmissionWebhookServer(handler http.Handler) (string, func(), error) {
// AdmissionWebhookHandler creates a HandlerFunc that decodes/encodes AdmissionReview and performs
// given admit function
func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview) error) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
data, err := ioutil.ReadAll(r.Body)
if err != nil {
@ -82,7 +82,7 @@ func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview)
if err := json.NewEncoder(w).Encode(review); err != nil {
t.Errorf("Marshal of response failed with error: %v", err)
}
})
}
}
// LocalhostCert was generated from crypto/tls/generate_cert.go with the following command:

View File

@ -117,7 +117,7 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.D
func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment, logf LogfFn) (string, error) {
var reason string
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
oldRSs, allOldRSs, newRS, err := GetAllReplicaSets(deployment, c)
if err != nil {
return "", err
}
@ -152,6 +152,40 @@ func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment
return "", nil
}
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSets(deployment *apps.Deployment, c clientset.Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) {
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
if err != nil {
return nil, nil, nil, err
}
oldRSes, allOldRSes := deploymentutil.FindOldReplicaSets(deployment, rsList)
newRS := deploymentutil.FindNewReplicaSet(deployment, rsList)
return oldRSes, allOldRSes, newRS, nil
}
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func GetOldReplicaSets(deployment *apps.Deployment, c clientset.Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) {
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
if err != nil {
return nil, nil, err
}
oldRSes, allOldRSes := deploymentutil.FindOldReplicaSets(deployment, rsList)
return oldRSes, allOldRSes, nil
}
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
// Returns nil if the new replica set doesn't exist yet.
func GetNewReplicaSet(deployment *apps.Deployment, c clientset.Interface) (*apps.ReplicaSet, error) {
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
if err != nil {
return nil, err
}
return deploymentutil.FindNewReplicaSet(deployment, rsList), nil
}
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
@ -180,7 +214,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
newRS, err = GetNewReplicaSet(deployment, c)
if err != nil {
return false, err
}
@ -223,7 +257,7 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
}
// Check revision of the new replica set of this deployment
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
newRS, err := GetNewReplicaSet(deployment, c)
if err != nil {
return fmt.Errorf("unable to get new replicaset of deployment %s during revision check: %v", deploymentName, err)
}
@ -344,7 +378,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason, latest deployment conditions: %+v", deployment.Name, deployment.Status.Conditions)
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
_, allOldRSs, newRS, err := GetAllReplicaSets(deployment, c)
if err == nil {
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
LogPodsOfDeployment(c, deployment, append(allOldRSs, newRS), logf)

View File

@ -31,16 +31,13 @@ import (
// RegistryList holds public and private image registries
type RegistryList struct {
GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"`
E2eRegistry string `yaml:"e2eRegistry"`
PromoterE2eRegistry string `yaml:"promoterE2eRegistry"`
BuildImageRegistry string `yaml:"buildImageRegistry"`
InvalidRegistry string `yaml:"invalidRegistry"`
GcEtcdRegistry string `yaml:"gcEtcdRegistry"`
GcRegistry string `yaml:"gcRegistry"`
SigStorageRegistry string `yaml:"sigStorageRegistry"`
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
PrivateRegistry string `yaml:"privateRegistry"`
SampleRegistry string `yaml:"sampleRegistry"`
MicrosoftRegistry string `yaml:"microsoftRegistry"`
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
CloudProviderGcpRegistry string `yaml:"cloudProviderGcpRegistry"`
@ -91,7 +88,6 @@ func initReg() RegistryList {
var (
initRegistry = RegistryList{
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images",
BuildImageRegistry: "k8s.gcr.io/build-image",
InvalidRegistry: "invalid.com/invalid",
@ -99,8 +95,6 @@ var (
GcRegistry: "k8s.gcr.io",
SigStorageRegistry: "k8s.gcr.io/sig-storage",
PrivateRegistry: "gcr.io/k8s-authenticated-test",
SampleRegistry: "gcr.io/google-samples",
GcrReleaseRegistry: "gcr.io/gke-release",
MicrosoftRegistry: "mcr.microsoft.com",
DockerLibraryRegistry: "docker.io/library",
CloudProviderGcpRegistry: "k8s.gcr.io/cloud-provider-gcp",
@ -204,48 +198,48 @@ const (
func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
configs := map[int]Config{}
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.32"}
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.33"}
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.4"}
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.3"}
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-1"}
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.5"}
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-2"}
configs[CheckMetadataConcealment] = Config{list.PromoterE2eRegistry, "metadata-concealment", "1.6"}
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "buster-v1.6.7"}
configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.3"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.4.13-0"}
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"}
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-1"}
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-1"}
configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "bullseye-v1.1.0"}
configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.4"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.1-0"}
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-2"}
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-2"}
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
configs[IpcUtils] = Config{list.PromoterE2eRegistry, "ipc-utils", "1.2"}
configs[JessieDnsutils] = Config{list.PromoterE2eRegistry, "jessie-dnsutils", "1.4"}
configs[Kitten] = Config{list.PromoterE2eRegistry, "kitten", "1.4"}
configs[Nautilus] = Config{list.PromoterE2eRegistry, "nautilus", "1.4"}
configs[IpcUtils] = Config{list.PromoterE2eRegistry, "ipc-utils", "1.3"}
configs[JessieDnsutils] = Config{list.PromoterE2eRegistry, "jessie-dnsutils", "1.5"}
configs[Kitten] = Config{list.PromoterE2eRegistry, "kitten", "1.5"}
configs[Nautilus] = Config{list.PromoterE2eRegistry, "nautilus", "1.5"}
configs[NFSProvisioner] = Config{list.SigStorageRegistry, "nfs-provisioner", "v2.2.2"}
configs[Nginx] = Config{list.PromoterE2eRegistry, "nginx", "1.14-1"}
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-1"}
configs[NodePerfNpbEp] = Config{list.PromoterE2eRegistry, "node-perf/npb-ep", "1.1"}
configs[NodePerfNpbIs] = Config{list.PromoterE2eRegistry, "node-perf/npb-is", "1.1"}
configs[Nginx] = Config{list.PromoterE2eRegistry, "nginx", "1.14-2"}
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-2"}
configs[NodePerfNpbEp] = Config{list.PromoterE2eRegistry, "node-perf/npb-ep", "1.2"}
configs[NodePerfNpbIs] = Config{list.PromoterE2eRegistry, "node-perf/npb-is", "1.2"}
configs[NodePerfTfWideDeep] = Config{list.PromoterE2eRegistry, "node-perf/tf-wide-deep", "1.1"}
configs[Nonewprivs] = Config{list.PromoterE2eRegistry, "nonewprivs", "1.3"}
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.1"}
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.2"}
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
configs[Pause] = Config{list.GcRegistry, "pause", "3.5"}
configs[Pause] = Config{list.GcRegistry, "pause", "3.6"}
configs[Perl] = Config{list.PromoterE2eRegistry, "perl", "5.26"}
configs[PrometheusDummyExporter] = Config{list.GcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
configs[PrometheusToSd] = Config{list.GcRegistry, "prometheus-to-sd", "v0.5.0"}
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-alpine"}
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-1"}
configs[RegressionIssue74839] = Config{list.PromoterE2eRegistry, "regression-issue-74839", "1.2"}
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.9"}
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.10"}
configs[SdDummyExporter] = Config{list.GcRegistry, "sd-dummy-exporter", "v0.2.0"}
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.2"}
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.2"}
configs[VolumeGlusterServer] = Config{list.PromoterE2eRegistry, "volume/gluster", "1.2"}
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.3"}
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.3"}
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.3"}
configs[VolumeGlusterServer] = Config{list.PromoterE2eRegistry, "volume/gluster", "1.3"}
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.4"}
configs[WindowsServer] = Config{list.MicrosoftRegistry, "windows", "1809"}
// if requested, map all the SHAs into a known format based on the input
@ -383,18 +377,12 @@ func replaceRegistryInImageURLWithList(imageURL string, reg RegistryList) (strin
}
switch registryAndUser {
case initRegistry.E2eRegistry:
registryAndUser = reg.E2eRegistry
case initRegistry.GcRegistry:
registryAndUser = reg.GcRegistry
case initRegistry.SigStorageRegistry:
registryAndUser = reg.SigStorageRegistry
case initRegistry.PrivateRegistry:
registryAndUser = reg.PrivateRegistry
case initRegistry.SampleRegistry:
registryAndUser = reg.SampleRegistry
case initRegistry.GcrReleaseRegistry:
registryAndUser = reg.GcrReleaseRegistry
case initRegistry.InvalidRegistry:
registryAndUser = reg.InvalidRegistry
case initRegistry.MicrosoftRegistry:

View File

@ -181,6 +181,9 @@ type RCConfig struct {
ConfigMapNames []string
ServiceAccountTokenProjections int
//Additional containers to run in the pod
AdditionalContainers []v1.Container
}
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
@ -343,6 +346,10 @@ func (config *DeploymentConfig) create() error {
},
}
if len(config.AdditionalContainers) > 0 {
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
if len(config.SecretNames) > 0 {
attachSecrets(&deployment.Spec.Template, config.SecretNames)
}
@ -425,6 +432,10 @@ func (config *ReplicaSetConfig) create() error {
},
}
if len(config.AdditionalContainers) > 0 {
rs.Spec.Template.Spec.Containers = append(rs.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
if len(config.SecretNames) > 0 {
attachSecrets(&rs.Spec.Template, config.SecretNames)
}
@ -618,6 +629,10 @@ func (config *RCConfig) create() error {
},
}
if len(config.AdditionalContainers) > 0 {
rc.Spec.Template.Spec.Containers = append(rc.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
if len(config.SecretNames) > 0 {
attachSecrets(rc.Spec.Template, config.SecretNames)
}
@ -1304,7 +1319,7 @@ func MakePodSpec() v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
Image: "k8s.gcr.io/pause:3.5",
Image: "k8s.gcr.io/pause:3.6",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
@ -1726,7 +1741,7 @@ type DaemonConfig struct {
func (config *DaemonConfig) Run() error {
if config.Image == "" {
config.Image = "k8s.gcr.io/pause:3.5"
config.Image = "k8s.gcr.io/pause:3.6"
}
nameLabel := map[string]string{
"name": config.Name + "-daemon",