vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -25,20 +25,18 @@ go_library(
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/daemon:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/job:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/replicaset:go_default_library",
"//pkg/controller/replication:go_default_library",
"//pkg/kubectl:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/schedulercache:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/util/pointer:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",

View File

@ -33,7 +33,6 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/test/e2e/framework"
)
@ -51,7 +50,7 @@ var _ = SIGDescribe("CronJob", func() {
successCommand := []string{"/bin/true"}
BeforeEach(func() {
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResourceBeta, f.Namespace.Name)
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name)
})
// multiple jobs running at once
@ -207,11 +206,7 @@ var _ = SIGDescribe("CronJob", func() {
By("Deleting the job")
job := cronJob.Status.Active[0]
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
By("Ensuring job was deleted")
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -202,7 +203,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
InternalClient: f.InternalClientset,
Name: rcName,
Namespace: ns,
Image: framework.GetPauseImageName(f.ClientSet),
Image: imageutils.GetPauseImageName(),
Replicas: numPods,
CreatedPods: &[]*v1.Pod{},
}
@ -257,7 +258,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status.
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true)
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really?
@ -288,9 +289,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill()
// This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false))
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
restarter.waitUp()
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true))
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
})
It("Kubelet should not restart containers across restart", func() {

View File

@ -24,6 +24,7 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -34,10 +35,8 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@ -69,22 +68,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
if daemonsets != nil && len(daemonsets.Items) > 0 {
for _, ds := range daemonsets.Items {
By(fmt.Sprintf("Deleting DaemonSet %q with reaper", ds.Name))
dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(f.Namespace.Name, ds.Name, 0, nil)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
}
}
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
} else {
framework.Logf("unable to dump daemonsets: %v", err)
}
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), pods))
framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
} else {
framework.Logf("unable to dump pods: %v", err)
}
@ -108,7 +104,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred())
})
It("should run and stop simple daemon", func() {
/*
Testname: DaemonSet-Creation
Description: A conformant Kubernetes distribution MUST support the creation of DaemonSets. When a DaemonSet
Pod is deleted, the DaemonSet controller MUST create a replacement Pod.
*/
framework.ConformanceIt("should run and stop simple daemon", func() {
label := map[string]string{daemonsetNameLabel: dsName}
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
@ -130,7 +131,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
})
It("should run and stop complex daemon", func() {
/*
Testname: DaemonSet-NodeSelection
Description: A conformant Kubernetes distribution MUST support DaemonSet Pod node selection via label
selectors.
*/
framework.ConformanceIt("should run and stop complex daemon", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon %q with a node selector", dsName)
@ -175,6 +181,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred())
})
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
// default scheduler.
It("should run and stop complex daemon with node affinity", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
@ -223,7 +231,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
})
It("should retry creating failed daemon pods", func() {
/*
Testname: DaemonSet-FailedPodCreation
Description: A conformant Kubernetes distribution MUST create new DaemonSet Pods when they fail.
*/
framework.ConformanceIt("should retry creating failed daemon pods", func() {
label := map[string]string{daemonsetNameLabel: dsName}
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
@ -245,13 +257,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
By("Wait for the failed daemon pod to be completely deleted.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
Expect(err).NotTo(HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted")
})
It("Should not update pod when spec was updated and update strategy is OnDelete", func() {
// This test should not be added to conformance. We will consider deprecating OnDelete when the
// extensions/v1beta1 and apps/v1beta1 are removed.
It("should not update pod when spec was updated and update strategy is OnDelete", func() {
label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName)
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods launch on every node of the cluster.")
@ -290,11 +310,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
})
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
/*
Testname: DaemonSet-RollingUpdate
Description: A conformant Kubernetes distribution MUST support DaemonSet RollingUpdates.
*/
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
label := map[string]string{daemonsetNameLabel: dsName}
templateGeneration := int64(999)
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
framework.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
@ -304,10 +327,6 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
Expect(err).NotTo(HaveOccurred())
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
@ -321,16 +340,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred())
templateGeneration++
By("Check that daemon pods images are updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
@ -345,7 +359,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
})
It("Should rollback without unnecessary restarts", func() {
/*
Testname: DaemonSet-Rollback
Description: A conformant Kubernetes distribution MUST support automated, minimally disruptive
rollback of updates to a DaemonSet.
*/
framework.ConformanceIt("should rollback without unnecessary restarts", func() {
// Skip clusters with only one node, where we cannot have half-done DaemonSet rollout for this test
framework.SkipUnlessNodeCountIsAtLeast(2)
@ -416,29 +435,15 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
}
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(c, ds.Namespace, ds.Name))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
}
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
return &apps.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: dsName,
},
Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: label,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: label,
@ -670,111 +675,6 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
}
}
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// We don't care about inactive pods
if !controller.IsPodActive(&pod) {
continue
}
podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration]
if podTemplateGeneration != templateGeneration {
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
}
}
return nil
}
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
return func() (bool, error) {
_, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
if !apierrs.IsNotFound(err) {
return false, err
}
return true, nil
}
}
func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
return func() (bool, error) {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// This pod is orphaned only when controller ref is cleared
if controllerRef := metav1.GetControllerOf(&pod); controllerRef != nil {
return false, nil
}
}
return true, nil
}
}
func checkDaemonSetHistoryOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
return func() (bool, error) {
histories := listDaemonHistories(c, ns, label)
for _, history := range histories.Items {
// This history is orphaned only when controller ref is cleared
if controllerRef := metav1.GetControllerOf(&history); controllerRef != nil {
return false, nil
}
}
return true, nil
}
}
func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
return func() (bool, error) {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
// This pod is adopted only when its controller ref is update
if controllerRef := metav1.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
return false, nil
}
}
return true, nil
}
}
func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
return func() (bool, error) {
histories := listDaemonHistories(c, ns, label)
for _, history := range histories.Items {
// This history is adopted only when its controller ref is update
if controllerRef := metav1.GetControllerOf(&history); controllerRef == nil || controllerRef.UID != dsUID {
return false, nil
}
}
return true, nil
}
}
func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) {
ns := ds.Namespace
label := ds.Spec.Template.Labels
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, ds.UID, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, ds.UID, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
framework.Logf("Make sure no daemon pod updated its template generation %d", podTemplateGeneration)
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(podTemplateGeneration))
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure no pods are recreated by looking at their names")
err = checkDaemonSetPodsName(c, ns, podPrefix, label)
Expect(err).NotTo(HaveOccurred())
}
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
pods := listDaemonPods(c, ns, label)
for _, pod := range pods.Items {
if !strings.HasPrefix(pod.Name, prefix) {
return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
}
}
return nil
}
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
for _, pod := range podList.Items {
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
@ -830,3 +730,15 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
Expect(curHistory).NotTo(BeNil())
return curHistory
}
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
return func() (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)
}
return false, nil
}
}

View File

@ -25,6 +25,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
@ -35,10 +36,8 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/framework"
testutil "k8s.io/kubernetes/test/utils"
@ -50,7 +49,7 @@ const (
)
var (
nilRs *extensions.ReplicaSet
nilRs *apps.ReplicaSet
)
var _ = SIGDescribe("Deployment", func() {
@ -100,7 +99,7 @@ var _ = SIGDescribe("Deployment", func() {
})
func failureTrap(c clientset.Interface, ns string) {
deployments, err := c.ExtensionsV1beta1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
return
@ -109,7 +108,7 @@ func failureTrap(c clientset.Interface, ns string) {
d := deployments.Items[i]
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
if err != nil {
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
return
@ -126,7 +125,7 @@ func failureTrap(c clientset.Interface, ns string) {
return
}
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
return
@ -158,27 +157,22 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
}
}
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
func stopDeployment(c clientset.Interface, ns, deploymentName string) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name))
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
options := metav1.ListOptions{LabelSelector: selector.String()}
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(rss.Items).Should(HaveLen(0))
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
@ -201,15 +195,14 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte
func testDeleteDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
internalClient := f.InternalClientset
deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
framework.Logf("Creating simple deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
@ -219,12 +212,12 @@ func testDeleteDeployment(f *framework.Framework) {
err = framework.WaitForDeploymentComplete(c, deploy)
Expect(err).NotTo(HaveOccurred())
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
Expect(newRS).NotTo(Equal(nilRs))
stopDeployment(c, internalClient, ns, deploymentName)
stopDeployment(c, ns, deploymentName)
}
func testRollingUpdateDeployment(f *framework.Framework) {
@ -245,7 +238,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
rs.Annotations = annotations
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
@ -254,8 +247,8 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// Create a deployment to delete nginx pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 3546343826724305833.
@ -269,15 +262,11 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// There should be 1 old RS (nginx-controller, which is adopted)
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
Expect(len(allOldRSs)).Should(Equal(1))
// The old RS should contain pod-template-hash in its selector, label, and template label
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
}
func testRecreateDeployment(f *framework.Framework) {
@ -287,8 +276,8 @@ func testRecreateDeployment(f *framework.Framework) {
// Create a deployment that brings up redis pods.
deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
@ -301,7 +290,7 @@ func testRecreateDeployment(f *framework.Framework) {
// Update deployment to delete redis pods and bring up nginx pods.
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
update.Spec.Template.Spec.Containers[0].Image = NginxImage
})
@ -324,7 +313,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
@ -371,9 +360,9 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
}
}()
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
_, err = c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
@ -395,7 +384,7 @@ func testRolloverDeployment(f *framework.Framework) {
rsName := "test-rollover-controller"
rsReplicas := int32(1)
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
Expect(err).NotTo(HaveOccurred())
// Verify that the required pods have come up.
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
@ -410,19 +399,19 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
deploymentReplicas := int32(1)
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %q", deploymentName)
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0),
MaxSurge: intOrStrP(1),
}
newDeployment.Spec.MinReadySeconds = int32(10)
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(newDeployment)
_, err = c.AppsV1().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred())
// Verify that the pods were scaled up and down as expected.
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
@ -433,17 +422,17 @@ func testRolloverDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensure that both replica sets have 1 created replica")
oldRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(1))
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(1))
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
})
@ -464,16 +453,16 @@ func testRolloverDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
framework.Logf("Ensure that both old replica sets have no replicas")
oldRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(oldRS, int32(0))
// Not really the new replica set anymore but we GET by name so that's fine.
newRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ensureReplicas(newRS, int32(0))
}
func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) {
func ensureReplicas(rs *apps.ReplicaSet, replicas int32) {
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
Expect(rs.Status.Replicas).Should(Equal(replicas))
}
@ -493,12 +482,12 @@ func testRollbackDeployment(f *framework.Framework) {
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
deploymentReplicas := int32(1)
deploymentImage := NginxImage
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
framework.Logf("Creating deployment %s", deploymentName)
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
createAnnotation := map[string]string{"action": "create", "author": "node"}
d.Annotations = createAnnotation
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
// Wait for it to be updated to revision 1
@ -516,7 +505,7 @@ func testRollbackDeployment(f *framework.Framework) {
updatedDeploymentImage := RedisImage
updatedDeploymentImageName := RedisImageName
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
update.Annotations = updateAnnotation
@ -619,7 +608,7 @@ func testRollbackDeployment(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
}
func randomScale(d *extensions.Deployment, i int) {
func randomScale(d *apps.Deployment, i int) {
switch r := rand.Float32(); {
case r < 0.3:
framework.Logf("%02d: scaling up", i)
@ -644,12 +633,12 @@ func testIterativeDeployments(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx"
thirty := int32(30)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
iterations := 20
@ -662,7 +651,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.2:
// trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
@ -672,16 +661,18 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.4:
// rollback to the previous version
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
rollbackTo := &extensions.RollbackConfig{Revision: 0}
update.Spec.RollbackTo = rollbackTo
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
if update.Annotations == nil {
update.Annotations = make(map[string]string)
}
update.Annotations[apps.DeprecatedRollbackTo] = "0"
})
Expect(err).NotTo(HaveOccurred())
case n < 0.6:
// just scaling
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
@ -690,14 +681,14 @@ func testIterativeDeployments(f *framework.Framework) {
// toggling the deployment
if deployment.Spec.Paused {
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = true
randomScale(update, i)
})
Expect(err).NotTo(HaveOccurred())
} else {
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = false
randomScale(update, i)
})
@ -731,10 +722,10 @@ func testIterativeDeployments(f *framework.Framework) {
}
// unpause the deployment if we end up pausing it
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if deployment.Spec.Paused {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Paused = false
})
}
@ -746,7 +737,7 @@ func testIterativeDeployments(f *framework.Framework) {
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(HaveOccurred())
}
func testDeploymentsControllerRef(f *framework.Framework) {
@ -757,8 +748,8 @@ func testDeploymentsControllerRef(f *framework.Framework) {
framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": NginxImageName}
replicas := int32(1)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
Expect(err).NotTo(HaveOccurred())
@ -784,8 +775,8 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
deploy, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deploy)
Expect(err).NotTo(HaveOccurred())
@ -814,13 +805,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Create a nginx deployment.
deploymentName := "nginx-deployment"
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
d.Spec.Strategy.RollingUpdate = new(apps.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for observed generation %d", deployment.Generation)
@ -834,13 +825,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
// Update the deployment with a non-existent image so that the new replica set
// will be blocked to simulate a partial rollout.
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
})
Expect(err).NotTo(HaveOccurred())
@ -863,13 +854,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), firstRS)
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
Expect(err).NotTo(HaveOccurred())
// Checking state of second rollout's replicaset.
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
Expect(err).NotTo(HaveOccurred())
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
@ -886,9 +877,9 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), secondRS)
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
Expect(err).NotTo(HaveOccurred())
// Check the deployment's minimum availability.
@ -900,15 +891,15 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas.
newReplicas = int32(30)
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
update.Spec.Replicas = &newReplicas
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
@ -946,18 +937,18 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
}
}
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *apps.ReplicaSetList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
return rsList
}
func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error {
func orphanDeploymentReplicaSets(c clientset.Interface, d *apps.Deployment) error {
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
return c.ExtensionsV1beta1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
return c.AppsV1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
}

View File

@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@ -111,11 +110,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("delete a job")
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
By("Ensuring job was deleted")
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
@ -179,7 +174,8 @@ var _ = SIGDescribe("Job", func() {
It("should exceed backoffLimit", func() {
By("Creating a job")
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, 0)
backoff := 1
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring job exceed backofflimit")
@ -187,11 +183,18 @@ var _ = SIGDescribe("Job", func() {
err = framework.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, framework.JobTimeout, "BackoffLimitExceeded")
Expect(err).NotTo(HaveOccurred())
By("Checking that only one pod created and status is failed")
By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).NotTo(HaveOccurred())
Expect(pods.Items).To(HaveLen(1))
pod := pods.Items[0]
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
// Expect(pods.Items).To(HaveLen(backoff + 1))
// due to NumRequeus not being stable enough, especially with failed status
// updates we need to allow more than backoff+1
// TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
if len(pods.Items) < backoff+1 {
framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
}
for _, pod := range pods.Items {
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
}
})
})

View File

@ -233,9 +233,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
common.NewSVCByName(c, ns, name)
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
framework.ExpectNoError(err)
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
err = framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
By("choose a node with at least one pod - we will block some network traffic on this node")
@ -298,9 +300,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
gracePeriod := int64(30)
common.NewSVCByName(c, ns, name)
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
framework.ExpectNoError(err)
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
err := framework.VerifyPods(c, ns, name, true, replicas)
err = framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
By("choose a node with at least one pod - we will block some network traffic on this node")
@ -371,10 +375,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
pst := framework.NewStatefulSetTester(c)
nn := framework.TestContext.CloudConfig.NumNodes
nodeNames, err := framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, nn)
nn, err := framework.NumberOfRegisteredNodes(f.ClientSet)
framework.ExpectNoError(err)
common.RestartNodes(f.ClientSet, nodeNames)
nodes, err := framework.CheckNodesReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err)
common.RestartNodes(f.ClientSet, nodes)
By("waiting for pods to be running again")
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)

View File

@ -38,6 +38,12 @@ import (
var _ = SIGDescribe("ReplicationController", func() {
f := framework.NewDefaultFramework("replication-controller")
/*
Release : v1.9
Testname: Replication Controller, run basic image
Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
*/
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
})

View File

@ -20,8 +20,8 @@ import (
"fmt"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -36,13 +36,17 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *apps.ReplicaSet {
zero := int64(0)
return &extensions.ReplicaSet{
return &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: rsName,
Name: rsName,
Labels: rsPodLabels,
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: rsPodLabels,
},
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
@ -78,6 +82,11 @@ func newPodQuota(name, number string) *v1.ResourceQuota {
var _ = SIGDescribe("ReplicaSet", func() {
f := framework.NewDefaultFramework("replicaset")
/*
Release : v1.9
Testname: Replica Set, run basic image
Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried.
*/
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
})
@ -111,7 +120,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
framework.Logf("Creating ReplicaSet %s", name)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(newRS)
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
Expect(err).NotTo(HaveOccurred())
// Check that pods for the new RS were created.
@ -187,14 +196,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Create(rs)
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
generation := rs.Generation
conditions := rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -204,7 +213,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
}
conditions = rs.Status.Conditions
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
return cond != nil, nil
})
@ -214,7 +223,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *extensions.ReplicaSet) {
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
x := int32(2)
update.Spec.Replicas = &x
})
@ -224,7 +233,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
generation = rs.Generation
conditions = rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -234,7 +243,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
}
conditions = rs.Status.Conditions
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
return cond == nil, nil
})
if err == wait.ErrWaitTimeout {
@ -267,7 +276,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
replicas := int32(1)
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName)
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(rsSt)
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
Expect(err).NotTo(HaveOccurred())
By("Then the orphan pod is adopted")

View File

@ -249,12 +249,9 @@ var _ = SIGDescribe("StatefulSet", func() {
})
/*
Testname: StatefulSet-RollingUpdate
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods
one at a time when the Pod template changes. The StatefulSet's status MUST indicate the
CurrentRevision and UpdateRevision. If the template is changed to match a prior revision,
StatefulSet MUST detect this as a rollback instead of creating a new revision.
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
Release : v1.9
Testname: StatefulSet, Rolling Update
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods one at a time when the Pod template changes. The StatefulSet's status MUST indicate the CurrentRevision and UpdateRevision. If the template is changed to match a prior revision, StatefulSet MUST detect this as a rollback instead of creating a new revision. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
By("Creating a new StatefulSet")
@ -372,11 +369,9 @@ var _ = SIGDescribe("StatefulSet", func() {
})
/*
Testname: StatefulSet-RollingUpdatePartition
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for
canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress,
StatefulSet MUST restore the Pod without violating the Partition.
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
Release : v1.9
Testname: StatefulSet, Rolling Update with Partition
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress, StatefulSet MUST restore the Pod without violating the Partition. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
By("Creating a new StaefulSet")
@ -670,11 +665,9 @@ var _ = SIGDescribe("StatefulSet", func() {
})
/*
Testname: StatefulSet-Scaling
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up,
and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any
Pods belonging to the StatefulSet are unhealthy.
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
Release : v1.9
Testname: StatefulSet, Scaling
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
psLabels := klabels.Set(labels)
@ -753,9 +746,9 @@ var _ = SIGDescribe("StatefulSet", func() {
})
/*
Testname: StatefulSet-BurstScaling
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling.
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
Release : v1.9
Testname: StatefulSet, Burst Scaling
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", func() {
psLabels := klabels.Set(labels)
@ -796,10 +789,9 @@ var _ = SIGDescribe("StatefulSet", func() {
})
/*
Testname: StatefulSet-RecreateFailedPod
Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state,
such as when they are rejected or evicted by a Node.
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
Release : v1.9
Testname: StatefulSet, Recreate Failed Pod
Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state, such as when they are rejected or evicted by a Node. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Should recreate evicted statefulset", func() {
podName := "test-pod"