mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
8
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
@ -33,13 +33,13 @@ go_library(
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -47,14 +47,12 @@ go_library(
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
@ -222,7 +222,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring MissingJob event has occured")
|
||||
By("Ensuring MissingJob event has occurred")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
@ -430,7 +430,7 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occured
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occurred
|
||||
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
@ -249,7 +249,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout)
|
||||
framework.GetMasterHost(), "kube-controller", ports.InsecureKubeControllerManagerPort, restartPollInterval, restartTimeout)
|
||||
restarter.restart()
|
||||
|
||||
// The intent is to ensure the replication controller manager has observed and reported status of
|
||||
@ -257,7 +257,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||
// and awaits it to be observed and reported back in the RC's status.
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true)
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true)
|
||||
|
||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||
// TODO: Can it really?
|
||||
@ -288,9 +288,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
restarter.kill()
|
||||
// This is best effort to try and create pods while the scheduler is down,
|
||||
// since we don't know exactly when it is restarted after the kill signal.
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp()
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
|
196
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
196
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
@ -22,10 +22,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -39,7 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -67,7 +65,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
@ -80,7 +78,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
@ -114,7 +112,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
@ -138,7 +136,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
@ -167,7 +165,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
@ -199,7 +197,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
@ -229,7 +227,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
@ -253,54 +251,43 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
templateGeneration := "1"
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(first.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
|
||||
|
||||
By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, templateGeneration)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
Expect(cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
Expect(cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
})
|
||||
|
||||
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
@ -309,11 +296,9 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
@ -324,20 +309,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
templateGeneration++
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
@ -352,90 +336,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
})
|
||||
|
||||
It("Should adopt existing pods when creating a RollingUpdate DaemonSet regardless of templateGeneration", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
// 1. Create a RollingUpdate DaemonSet
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Make sure all daemon pods have correct template generation %d", templateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Orphan DaemonSet pods
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", dsName)
|
||||
deleteDaemonSetAndOrphan(c, ds)
|
||||
|
||||
// 3. Adopt DaemonSet pods (no restart)
|
||||
newDSName := "adopt"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName)
|
||||
newDS := newDaemonSet(newDSName, image, label)
|
||||
newDS.Spec.TemplateGeneration = templateGeneration
|
||||
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newDS.Name)
|
||||
waitDaemonSetAdoption(c, newDS, ds.Name, templateGeneration)
|
||||
|
||||
// 4. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newDSName)
|
||||
deleteDaemonSetAndOrphan(c, newDS)
|
||||
|
||||
// 5. Adopt DaemonSet pods (no restart) as long as template matches, even when templateGeneration doesn't match
|
||||
newAdoptDSName := "adopt-template-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS := newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf(fmt.Sprintf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name))
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
|
||||
// 6. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newAdoptDSName)
|
||||
deleteDaemonSetAndOrphan(c, newAdoptDS)
|
||||
|
||||
// 7. Adopt DaemonSet pods (no restart) as long as templateGeneration matches, even when template doesn't match
|
||||
newAdoptDSName = "adopt-template-generation-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS = newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match"
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS.Spec.TemplateGeneration = templateGeneration
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
})
|
||||
|
||||
It("Should rollback without unnecessary restarts", func() {
|
||||
@ -445,8 +352,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
@ -456,7 +363,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -483,7 +390,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(len(newPods)).NotTo(Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -511,11 +418,11 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
|
||||
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
|
||||
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
||||
err := c.ExtensionsV1beta1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
@ -526,12 +433,12 @@ func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
|
||||
return &apps.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
@ -623,7 +530,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
return newNode, nil
|
||||
}
|
||||
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -662,14 +569,14 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nod
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeNames := schedulableNodes(f.ClientSet, ds)
|
||||
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
}
|
||||
}
|
||||
|
||||
func schedulableNodes(c clientset.Interface, ds *extensions.DaemonSet) []string {
|
||||
func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
@ -696,7 +603,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
@ -708,12 +615,12 @@ func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
return fit
|
||||
}
|
||||
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *apps.DaemonSet) func() (bool, error) {
|
||||
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from v1.")
|
||||
}
|
||||
@ -724,7 +631,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -770,7 +677,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
|
||||
if !controller.IsPodActive(&pod) {
|
||||
continue
|
||||
}
|
||||
podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration]
|
||||
if podTemplateGeneration != templateGeneration {
|
||||
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
|
||||
}
|
||||
@ -780,7 +687,7 @@ func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label m
|
||||
|
||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
_, err := c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
_, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
@ -840,7 +747,7 @@ func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.
|
||||
}
|
||||
}
|
||||
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *extensions.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
ns := ds.Namespace
|
||||
label := ds.Spec.Template.Labels
|
||||
|
||||
@ -868,16 +775,13 @@ func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash, templateGeneration string) {
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
||||
for _, pod := range podList.Items {
|
||||
podHash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
podTemplate := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(len(podHash)).To(BeNumerically(">", 0))
|
||||
if len(hash) > 0 {
|
||||
Expect(podHash).To(Equal(hash))
|
||||
}
|
||||
Expect(len(podTemplate)).To(BeNumerically(">", 0))
|
||||
Expect(podTemplate).To(Equal(templateGeneration))
|
||||
}
|
||||
}
|
||||
|
||||
@ -902,19 +806,19 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options)
|
||||
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(historyList.Items)).To(BeNumerically(">", 0))
|
||||
return historyList
|
||||
}
|
||||
|
||||
func curHistory(historyList *apps.ControllerRevisionList, ds *extensions.DaemonSet) *apps.ControllerRevision {
|
||||
func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *apps.ControllerRevision {
|
||||
var curHistory *apps.ControllerRevision
|
||||
foundCurHistories := 0
|
||||
for i := range historyList.Items {
|
||||
history := &historyList.Items[i]
|
||||
// Every history should have the hash label
|
||||
Expect(len(history.Labels[extensions.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
|
||||
Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if match {
|
||||
|
143
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
143
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
@ -92,6 +92,9 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
testDeploymentsControllerRef(f)
|
||||
})
|
||||
It("deployment should support proportional scaling", func() {
|
||||
testProportionalScalingDeployment(f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
// See https://github.com/kubernetes/kubernetes/issues/29229
|
||||
})
|
||||
@ -423,7 +426,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
err = framework.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
@ -760,11 +763,18 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking its ReplicaSet has the right controllerRef")
|
||||
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
Expect(len(rsList.Items)).Should(Equal(1))
|
||||
|
||||
framework.Logf("Obtaining the ReplicaSet's UID")
|
||||
orphanedRSUID := rsList.Items[0].UID
|
||||
|
||||
framework.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSets", deploymentName)
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -783,6 +793,133 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
Expect(len(rsList.Items)).Should(Equal(1))
|
||||
|
||||
framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
Expect(rsList.Items[0].UID).Should(Equal(orphanedRSUID))
|
||||
}
|
||||
|
||||
// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle
|
||||
// of a rollout (either in progress or paused), then the Deployment will balance additional replicas
|
||||
// in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk.
|
||||
func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(10)
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx-deployment"
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
framework.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
|
||||
// Checking state of first rollout's replicaset.
|
||||
maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), firstRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have 0 available replicas.
|
||||
framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
Expect(secondRS.Status.AvailableReplicas).Should(Equal(int32(0)))
|
||||
|
||||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), secondRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
if deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||
Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// schedulingTimeout is longer specifically because sometimes we need to wait
|
||||
@ -64,7 +65,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
// Since disruptionAllowed starts out 0, if we see it ever become positive,
|
||||
// that means the controller is working.
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
pdb, err := cs.Policy().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -226,7 +227,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@ -241,7 +242,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
@ -257,7 +258,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
@ -301,7 +302,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) {
|
||||
container := v1.Container{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
Image: imageutils.GetE2EImage(imageutils.EchoServer),
|
||||
}
|
||||
if exclusive {
|
||||
container.Ports = []v1.ContainerPort{
|
||||
@ -330,6 +331,6 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.Extensions().ReplicaSets(ns).Create(rs)
|
||||
_, err := cs.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
@ -77,12 +77,15 @@ var _ = SIGDescribe("Job", func() {
|
||||
// Worst case analysis: 15 failures, each taking 1 minute to
|
||||
// run due to some slowness, 1 in 2^15 chance of happening,
|
||||
// causing test flake. Should be very rare.
|
||||
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, 999)
|
||||
// With the introduction of backoff limit and high failure rate this
|
||||
// is hitting its timeout, the 3 is a reasonable that should make this
|
||||
// test less flaky, for now.
|
||||
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
@ -33,7 +33,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
nodepkg "k8s.io/kubernetes/pkg/controller/node"
|
||||
nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -104,25 +104,20 @@ func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) er
|
||||
|
||||
var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("network-partition")
|
||||
var systemPodsNo int32
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
var group string
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke", "aws")
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
} else {
|
||||
group = framework.TestContext.CloudConfig.NodeInstanceGroup
|
||||
}
|
||||
})
|
||||
|
||||
@ -371,7 +366,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
@ -387,7 +382,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
|
||||
It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
|
90
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
90
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
@ -23,8 +23,7 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
klabels "k8s.io/apimachinery/pkg/labels"
|
||||
@ -89,13 +88,15 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should provide basic identity", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 3
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Saturating stateful set " + ss.Name)
|
||||
@ -126,6 +127,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should adopt matching orphans and release non-matching pods", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 1
|
||||
@ -135,7 +138,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
// Replace ss with the one returned from Create() so it has the UID.
|
||||
// Save Kind since it won't be populated in the returned ss.
|
||||
kind := ss.Kind
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss.Kind = kind
|
||||
|
||||
@ -209,13 +212,15 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
)).To(Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should not deadlock when a pod's predecessor fails", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
*(ss.Spec.Replicas) = 2
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.PauseNewPods(ss)
|
||||
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
sst.WaitForRunning(1, 0, ss)
|
||||
@ -243,12 +248,20 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
})
|
||||
|
||||
It("should perform rolling updates and roll backs of template modifications", func() {
|
||||
/*
|
||||
Testname: StatefulSet-RollingUpdate
|
||||
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods
|
||||
one at a time when the Pod template changes. The StatefulSet's status MUST indicate the
|
||||
CurrentRevision and UpdateRevision. If the template is changed to match a prior revision,
|
||||
StatefulSet MUST detect this as a rollback instead of creating a new revision.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
@ -292,7 +305,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal updste revision %s on update completion",
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
@ -325,7 +338,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll bakc")
|
||||
"Current revision should not equal update revision during roll back")
|
||||
Expect(priorRevision).To(Equal(updateRevision),
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
@ -358,7 +371,14 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should perform canary updates and phased rolling updates of template modifications", func() {
|
||||
/*
|
||||
Testname: StatefulSet-RollingUpdatePartition
|
||||
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for
|
||||
canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress,
|
||||
StatefulSet MUST restore the Pod without violating the Partition.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
|
||||
By("Creating a new StaefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
@ -373,7 +393,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}()}
|
||||
}(),
|
||||
}
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
@ -422,7 +442,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
currentRevision))
|
||||
}
|
||||
|
||||
By("By performing a canary update")
|
||||
By("Performing a canary update")
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||
@ -567,6 +587,8 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// The legacy OnDelete strategy only exists for backward compatibility with pre-v1 APIs.
|
||||
It("should implement legacy replacement when the update strategy is OnDelete", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
@ -575,7 +597,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||
Type: apps.OnDeleteStatefulSetStrategyType,
|
||||
}
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
@ -647,7 +669,14 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
|
||||
/*
|
||||
Testname: StatefulSet-Scaling
|
||||
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up,
|
||||
and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any
|
||||
Pods belonging to the StatefulSet are unhealthy.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
By("Initializing watcher for selector " + psLabels.String())
|
||||
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||
@ -659,7 +688,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err = c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
@ -723,7 +752,12 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Burst scaling should run to completion even with unhealthy pods", func() {
|
||||
/*
|
||||
Testname: StatefulSet-BurstScaling
|
||||
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
|
||||
By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||
@ -731,7 +765,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
ss.Spec.PodManagementPolicy = apps.ParallelPodManagement
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1beta1().StatefulSets(ns).Create(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||
@ -761,7 +795,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
sst.WaitForStatusReplicas(ss, 0)
|
||||
})
|
||||
|
||||
It("Should recreate evicted statefulset", func() {
|
||||
/*
|
||||
Testname: StatefulSet-RecreateFailedPod
|
||||
Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state,
|
||||
such as when they are rejected or evicted by a Node.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Should recreate evicted statefulset", func() {
|
||||
podName := "test-pod"
|
||||
statefulPodName := ssName + "-0"
|
||||
By("Looking for a node to schedule stateful set and pod")
|
||||
@ -793,7 +833,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
|
||||
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
|
||||
ss.Spec.Template.Spec.NodeName = node.Name
|
||||
_, err = f.ClientSet.AppsV1beta1().StatefulSets(f.Namespace.Name).Create(ss)
|
||||
_, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(ss)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
|
||||
@ -844,7 +884,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
return nil
|
||||
}, framework.StatefulPodTimeout, 2*time.Second).Should(BeNil())
|
||||
})
|
||||
|
||||
/* Comment it for now until scale sub-resource is finalized in ref:pull/53679 for scale-sub resource specific comment.
|
||||
It("should have a working scale subresource", func() {
|
||||
By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
|
||||
@ -858,6 +898,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
By("getting scale subresource")
|
||||
scale := framework.NewStatefulSetScale(ss)
|
||||
scaleResult := &appsv1beta2.Scale{}
|
||||
|
||||
err = c.AppsV1beta2().RESTClient().Get().AbsPath("/apis/apps/v1beta2").Namespace(ns).Resource("statefulsets").Name(ssName).SubResource("scale").Do().Into(scale)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get scale subresource: %v", err)
|
||||
@ -881,6 +922,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
}
|
||||
Expect(*(ss.Spec.Replicas)).To(Equal(int32(2)))
|
||||
})
|
||||
*/
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
|
||||
@ -900,21 +942,29 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working zookeeper cluster", func() {
|
||||
appTester.statefulPod = &zookeeperTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working redis cluster", func() {
|
||||
appTester.statefulPod = &redisTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working mysql cluster", func() {
|
||||
appTester.statefulPod = &mysqlGaleraTester{tester: sst}
|
||||
appTester.run()
|
||||
})
|
||||
|
||||
// Do not mark this as Conformance.
|
||||
// StatefulSet Conformance should not be dependent on specific applications.
|
||||
It("should creating a working CockroachDB cluster", func() {
|
||||
appTester.statefulPod = &cockroachDBTester{tester: sst}
|
||||
appTester.run()
|
||||
|
Reference in New Issue
Block a user