mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
Fresh dep ensure
This commit is contained in:
45
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
@ -37,7 +37,28 @@ go_library(
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -45,27 +66,7 @@ go_library(
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
@ -5,3 +5,5 @@ approvers:
|
||||
- mfojtik
|
||||
reviewers:
|
||||
- sig-apps-reviewers
|
||||
labels:
|
||||
- sig/apps
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
70
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
@ -34,6 +34,7 @@ import (
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -179,8 +180,8 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring no unexpected event has happened")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
@ -213,13 +214,13 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
|
||||
By("Ensuring there are no active jobs in the cronjob")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
|
||||
By("Ensuring the job is not in the cronjob active list")
|
||||
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring MissingJob event has occurred")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
@ -298,7 +299,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
@ -335,7 +336,7 @@ func deleteCronJob(c clientset.Interface, ns, name string) error {
|
||||
// Wait for at least given amount of active jobs.
|
||||
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
curr, err := getCronJob(c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -349,7 +350,7 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
|
||||
// empty after the timeout.
|
||||
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
curr, err := getCronJob(c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -362,6 +363,23 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
|
||||
})
|
||||
}
|
||||
|
||||
// Wait till a given job actually goes away from the Active list for a given cronjob
|
||||
func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := getCronJob(c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, j := range curr.Status.Active {
|
||||
if j.Name == jobName {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to not exist by listing jobs explicitly.
|
||||
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
@ -425,24 +443,26 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occurred
|
||||
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err)
|
||||
}
|
||||
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in listing events: %s", err)
|
||||
}
|
||||
for _, e := range events.Items {
|
||||
for _, reason := range reasons {
|
||||
if e.Reason == reason {
|
||||
return fmt.Errorf("Found event with reason %s: %#v", reason, e)
|
||||
// waitForEventWithReason waits for events with a reason within a list has occurred
|
||||
func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
return wait.Poll(framework.Poll, 30*time.Second, func() (bool, error) {
|
||||
sj, err := getCronJob(c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, e := range events.Items {
|
||||
for _, reason := range reasons {
|
||||
if e.Reason == reason {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// filterNotDeletedJobs returns the job list without any jobs that are pending
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
@ -281,7 +281,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout)
|
||||
framework.GetMasterHost(), "kube-scheduler", ports.InsecureSchedulerPort, restartPollInterval, restartTimeout)
|
||||
|
||||
// Create pods while the scheduler is down and make sure the scheduler picks them up by
|
||||
// scaling the rc to the same size.
|
||||
|
39
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
@ -24,8 +24,7 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -54,6 +53,10 @@ const (
|
||||
daemonsetColorLabel = daemonsetLabelPrefix + "color"
|
||||
)
|
||||
|
||||
// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
|
||||
// node selectors labels to namespaces
|
||||
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
|
||||
|
||||
// This test must be run in serial because it assumes the Daemon Set pods will
|
||||
// always get scheduled. If we run other tests in parallel, this may not
|
||||
// happen. In the future, running in parallel may work if we have an eviction
|
||||
@ -100,7 +103,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
c = f.ClientSet
|
||||
err := clearDaemonSetNodeLabels(c)
|
||||
|
||||
updatedNS, err := updateNamespaceAnnotations(c, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ns = updatedNS.Name
|
||||
|
||||
err = clearDaemonSetNodeLabels(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -495,6 +504,26 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
|
||||
func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
|
||||
nsClient := c.CoreV1().Namespaces()
|
||||
|
||||
ns, err := nsClient.Get(nsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
for _, n := range NamespaceNodeSelectors {
|
||||
ns.Annotations[n] = ""
|
||||
}
|
||||
|
||||
return nsClient.Update(ns)
|
||||
}
|
||||
|
||||
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
|
||||
nodeClient := c.CoreV1().Nodes()
|
||||
var newNode *v1.Node
|
||||
@ -520,7 +549,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
|
||||
return true, err
|
||||
}
|
||||
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
framework.Logf("failed to update node due to resource version conflict")
|
||||
return false, nil
|
||||
}
|
||||
@ -734,7 +763,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
|
||||
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)
|
||||
|
37
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
37
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
@ -38,9 +38,9 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -70,16 +70,35 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
It("deployment reaping should cascade to its replica sets and pods", func() {
|
||||
testDeleteDeployment(f)
|
||||
})
|
||||
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
|
||||
/*
|
||||
Testname: Deployment RollingUpdate
|
||||
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
|
||||
*/
|
||||
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
|
||||
testRollingUpdateDeployment(f)
|
||||
})
|
||||
It("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
/*
|
||||
Testname: Deployment Recreate
|
||||
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
|
||||
*/
|
||||
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
testRecreateDeployment(f)
|
||||
})
|
||||
It("deployment should delete old replica sets", func() {
|
||||
/*
|
||||
Testname: Deployment RevisionHistoryLimit
|
||||
Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
|
||||
the Deployment's `.spec.revisionHistoryLimit`.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should delete old replica sets", func() {
|
||||
testDeploymentCleanUpPolicy(f)
|
||||
})
|
||||
It("deployment should support rollover", func() {
|
||||
/*
|
||||
Testname: Deployment Rollover
|
||||
Description: A conformant Kubernetes distribution MUST support Deployment rollover,
|
||||
i.e. allow arbitrary number of changes to desired state during rolling update
|
||||
before the rollout finishes.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should support rollover", func() {
|
||||
testRolloverDeployment(f)
|
||||
})
|
||||
It("deployment should support rollback", func() {
|
||||
@ -91,7 +110,13 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
testDeploymentsControllerRef(f)
|
||||
})
|
||||
It("deployment should support proportional scaling", func() {
|
||||
/*
|
||||
Testname: Deployment Proportional Scaling
|
||||
Description: A conformant Kubernetes distribution MUST support Deployment
|
||||
proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
|
||||
when a Deployment is scaled.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should support proportional scaling", func() {
|
||||
testProportionalScalingDeployment(f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
@ -44,7 +44,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -63,7 +63,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -84,7 +84,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
@ -106,12 +106,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("network-partition")
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
@ -197,11 +196,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
host, err := framework.GetNodeExternalIP(&node)
|
||||
framework.ExpectNoError(err)
|
||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.UnblockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
@ -214,7 +216,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
}
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.BlockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
@ -574,11 +578,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
host, err := framework.GetNodeExternalIP(&node)
|
||||
framework.ExpectNoError(err)
|
||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.UnblockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
@ -588,7 +595,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
expectNodeReadiness(true, newNode)
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.BlockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
@ -60,11 +60,21 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
testReplicationControllerConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Replication Controller, adopt matching pods
|
||||
Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod
|
||||
*/
|
||||
framework.ConformanceIt("should adopt matching pods on creation", func() {
|
||||
testRCAdoptMatchingOrphans(f)
|
||||
})
|
||||
|
||||
It("should release no longer matching pods", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Replication Controller, release pods
|
||||
Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references.
|
||||
*/
|
||||
framework.ConformanceIt("should release no longer matching pods", func() {
|
||||
testRCReleaseControlledNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
@ -103,7 +103,12 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
testReplicaSetConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation and release no longer matching pods", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Replica Set, adopt matching pods and release non matching pods
|
||||
Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references
|
||||
*/
|
||||
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() {
|
||||
testRSAdoptMatchingAndReleaseNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
259
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
259
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package apps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -111,7 +113,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
By("Verifying statefulset set proper service name")
|
||||
framework.ExpectNoError(sst.CheckServiceName(ss, headlessSvcName))
|
||||
|
||||
cmd := "echo $(hostname) > /data/hostname; sync;"
|
||||
cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
|
||||
By("Running " + cmd + " in all stateful pods")
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
|
||||
@ -248,6 +250,14 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
|
||||
By("Creating a new StatefulSet with PVCs")
|
||||
*(ss.Spec.Replicas) = 3
|
||||
rollbackTest(c, ns, ss)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: StatefulSet, Rolling Update
|
||||
@ -256,116 +266,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
By("Updating Pods in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
|
||||
By("Rolling back to a previous revision")
|
||||
sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll back")
|
||||
Expect(priorRevision).To(Equal(updateRevision),
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
By("Rolling back update in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
priorRevision))
|
||||
}
|
||||
rollbackTest(c, ns, ss)
|
||||
})
|
||||
|
||||
/*
|
||||
@ -700,7 +601,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
By("Verifying that stateful set " + ssName + " was scaled up in order")
|
||||
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
|
||||
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
@ -731,7 +634,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
|
||||
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
|
||||
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Deleted {
|
||||
return false, nil
|
||||
}
|
||||
@ -810,7 +715,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Ports: []v1.ContainerPort{conflictingPort},
|
||||
},
|
||||
},
|
||||
@ -837,8 +742,10 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
|
||||
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
|
||||
framework.ExpectNoError(err)
|
||||
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
|
||||
_, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
|
||||
defer cancel()
|
||||
// we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
pod := event.Object.(*v1.Pod)
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
@ -862,7 +769,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
|
||||
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
|
||||
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
|
||||
Eventually(func() error {
|
||||
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -1076,7 +983,7 @@ func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||
framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
@ -1176,3 +1083,119 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// This function is used by two tests to test StatefulSet rollbacks: one using
|
||||
// PVCs and one using no storage.
|
||||
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
By("Updating Pods in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
|
||||
By("Rolling back to a previous revision")
|
||||
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll back")
|
||||
Expect(priorRevision).To(Equal(updateRevision),
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
By("Rolling back update in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
priorRevision))
|
||||
}
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
@ -31,7 +31,7 @@ var (
|
||||
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
|
||||
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
|
||||
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
|
||||
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
|
||||
)
|
||||
|
Reference in New Issue
Block a user