mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
87
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
Normal file
87
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cronjob.go",
|
||||
"daemon_restart.go",
|
||||
"daemon_set.go",
|
||||
"deployment.go",
|
||||
"disruption.go",
|
||||
"framework.go",
|
||||
"job.go",
|
||||
"network_partition.go",
|
||||
"rc.go",
|
||||
"replica_set.go",
|
||||
"statefulset.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/apps",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
Executable file
7
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
Executable file
@ -0,0 +1,7 @@
|
||||
approvers:
|
||||
- janetkuo
|
||||
- nikhiljindal
|
||||
- kargakis
|
||||
- mfojtik
|
||||
reviewers:
|
||||
- sig-apps-reviewers
|
476
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
Normal file
476
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
Normal file
@ -0,0 +1,476 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
// How long to wait for a cronjob
|
||||
cronJobTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CronJob", func() {
|
||||
f := framework.NewDefaultFramework("cronjob")
|
||||
|
||||
sleepCommand := []string{"sleep", "300"}
|
||||
|
||||
// Pod will complete instantly
|
||||
successCommand := []string{"/bin/true"}
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResourceBeta, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// multiple jobs running at once
|
||||
It("should schedule multiple jobs concurrently", func() {
|
||||
By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring more than one job is running at a time")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(len(activeJobs) >= 2).To(BeTrue())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// suspended should not schedule jobs
|
||||
It("should not schedule jobs when suspended [Slow]", func() {
|
||||
By("Creating a suspended cronjob")
|
||||
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
sleepCommand, nil)
|
||||
t := true
|
||||
cronJob.Spec.Suspend = &t
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring no jobs are scheduled")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring no job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(jobs.Items).To(HaveLen(0))
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// only single active job is allowed for ForbidConcurrent
|
||||
It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
|
||||
By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
||||
By("Ensuring no more jobs are scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// only single active job is allowed for ReplaceConcurrent
|
||||
It("should replace jobs when ReplaceConcurrent", func() {
|
||||
By("Creating a ReplaceConcurrent cronjob")
|
||||
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
||||
By("Ensuring the job is replaced with a new one")
|
||||
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// shouldn't give us unexpected warnings
|
||||
It("should not emit unexpected warnings", func() {
|
||||
By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
|
||||
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring no unexpected event has happened")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// deleted jobs should be removed from the active list
|
||||
It("should remove from active list jobs that have been deleted", func() {
|
||||
By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Deleting the job")
|
||||
job := cronJob.Status.Active[0]
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
|
||||
By("Ensuring there are no active jobs in the cronjob")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring MissingJob event has occured")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// cleanup of successful finished jobs, with limit of one successful job
|
||||
It("should delete successful finished jobs with limit of one successful job", func() {
|
||||
By("Creating a AllowConcurrent cronjob with custom history limits")
|
||||
successLimit := int32(1)
|
||||
cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
successCommand, &successLimit)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Job is going to complete instantly: do not check for an active job
|
||||
// as we are most likely to miss it
|
||||
|
||||
By("Ensuring a finished job exists")
|
||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a finished job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, finishedJobs := filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
|
||||
// Job should get deleted when the next job finishes the next minute
|
||||
By("Ensuring this job does not exist anymore")
|
||||
err = waitForJobNotExist(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||
jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, finishedJobs = filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
// newTestCronJob returns a cronjob which does one of several testing behaviors.
|
||||
func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.ConcurrencyPolicy,
|
||||
command []string, successfulJobsHistoryLimit *int32) *batchv1beta1.CronJob {
|
||||
parallelism := int32(1)
|
||||
completions := int32(1)
|
||||
sj := &batchv1beta1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CronJob",
|
||||
},
|
||||
Spec: batchv1beta1.CronJobSpec{
|
||||
Schedule: schedule,
|
||||
ConcurrencyPolicy: concurrencyPolicy,
|
||||
JobTemplate: batchv1beta1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ¶llelism,
|
||||
Completions: &completions,
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "data",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "busybox",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
sj.Spec.SuccessfulJobsHistoryLimit = successfulJobsHistoryLimit
|
||||
if command != nil {
|
||||
sj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = command
|
||||
}
|
||||
return sj
|
||||
}
|
||||
|
||||
func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
|
||||
return c.BatchV1beta1().CronJobs(ns).Create(cronJob)
|
||||
}
|
||||
|
||||
func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) {
|
||||
return c.BatchV1beta1().CronJobs(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func deleteCronJob(c clientset.Interface, ns, name string) error {
|
||||
return c.BatchV1beta1().CronJobs(ns).Delete(name, nil)
|
||||
}
|
||||
|
||||
// Wait for at least given amount of active jobs.
|
||||
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(curr.Status.Active) >= active, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for jobs to appear in the active list of a cronjob or not.
|
||||
// When failIfNonEmpty is set, this fails if the active set of jobs is still non-empty after
|
||||
// the timeout. When failIfNonEmpty is not set, this fails if the active set of jobs is still
|
||||
// empty after the timeout.
|
||||
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if failIfNonEmpty {
|
||||
return len(curr.Status.Active) == 0, nil
|
||||
} else {
|
||||
return len(curr.Status.Active) != 0, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to not exist by listing jobs explicitly.
|
||||
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, finishedJobs := filterActiveJobs(jobs)
|
||||
for _, job := range finishedJobs {
|
||||
if targetJob.Namespace == job.Namespace && targetJob.Name == job.Name {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to be replaced with a new one.
|
||||
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous.
|
||||
aliveJobs := filterNotDeletedJobs(jobs)
|
||||
if len(aliveJobs) > 1 {
|
||||
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
|
||||
} else if len(aliveJobs) == 0 {
|
||||
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
return false, nil
|
||||
}
|
||||
return aliveJobs[0].Name != previousJobName, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForJobsAtLeast waits for at least a number of jobs to appear.
|
||||
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(jobs.Items) >= atLeast, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForAnyFinishedJob waits for any completed job to appear.
|
||||
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range jobs.Items {
|
||||
if job.IsJobFinished(&jobs.Items[i]) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occured
|
||||
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err)
|
||||
}
|
||||
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in listing events: %s", err)
|
||||
}
|
||||
for _, e := range events.Items {
|
||||
for _, reason := range reasons {
|
||||
if e.Reason == reason {
|
||||
return fmt.Errorf("Found event with reason %s: %#v", reason, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterNotDeletedJobs returns the job list without any jobs that are pending
|
||||
// deletion.
|
||||
func filterNotDeletedJobs(jobs *batchv1.JobList) []*batchv1.Job {
|
||||
var alive []*batchv1.Job
|
||||
for i := range jobs.Items {
|
||||
job := &jobs.Items[i]
|
||||
if job.DeletionTimestamp == nil {
|
||||
alive = append(alive, job)
|
||||
}
|
||||
}
|
||||
return alive
|
||||
}
|
||||
|
||||
func filterActiveJobs(jobs *batchv1.JobList) (active []*batchv1.Job, finished []*batchv1.Job) {
|
||||
for i := range jobs.Items {
|
||||
j := jobs.Items[i]
|
||||
if !job.IsJobFinished(&j) {
|
||||
active = append(active, &j)
|
||||
} else {
|
||||
finished = append(finished, &j)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
315
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
Normal file
315
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// This test primarily checks 2 things:
|
||||
// 1. Daemons restart automatically within some sane time (10m).
|
||||
// 2. They don't take abnormal actions when restarted in the steady state.
|
||||
// - Controller manager shouldn't overshoot replicas
|
||||
// - Kubelet shouldn't restart containers
|
||||
// - Scheduler should continue assigning hosts to new pods
|
||||
|
||||
const (
|
||||
restartPollInterval = 5 * time.Second
|
||||
restartTimeout = 10 * time.Minute
|
||||
numPods = 10
|
||||
sshPort = 22
|
||||
ADD = "ADD"
|
||||
DEL = "DEL"
|
||||
UPDATE = "UPDATE"
|
||||
)
|
||||
|
||||
// restartDaemonConfig is a config to restart a running daemon on a node, and wait till
|
||||
// it comes back up. It uses ssh to send a SIGTERM to the daemon.
|
||||
type restartDaemonConfig struct {
|
||||
nodeName string
|
||||
daemonName string
|
||||
healthzPort int
|
||||
pollInterval time.Duration
|
||||
pollTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewRestartConfig creates a restartDaemonConfig for the given node and daemon.
|
||||
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig {
|
||||
if !framework.ProviderIs("gce") {
|
||||
framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
|
||||
}
|
||||
return &restartDaemonConfig{
|
||||
nodeName: nodeName,
|
||||
daemonName: daemonName,
|
||||
healthzPort: healthzPort,
|
||||
pollInterval: pollInterval,
|
||||
pollTimeout: pollTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *restartDaemonConfig) String() string {
|
||||
return fmt.Sprintf("Daemon %v on node %v", r.daemonName, r.nodeName)
|
||||
}
|
||||
|
||||
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
|
||||
func (r *restartDaemonConfig) waitUp() {
|
||||
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
healthzCheck := fmt.Sprintf(
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
||||
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
|
||||
result, err := framework.NodeExec(r.nodeName, healthzCheck)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code == 0 {
|
||||
httpCode, err := strconv.Atoi(result.Stdout)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to parse healthz http return code: %v", err)
|
||||
} else if httpCode == 200 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
|
||||
r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr)
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "%v did not respond with a 200 via %v within %v", r, healthzCheck, r.pollTimeout)
|
||||
}
|
||||
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *restartDaemonConfig) kill() {
|
||||
framework.Logf("Killing %v", r)
|
||||
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Restart checks if the daemon is up, kills it, and waits till it comes back up
|
||||
func (r *restartDaemonConfig) restart() {
|
||||
r.waitUp()
|
||||
r.kill()
|
||||
r.waitUp()
|
||||
}
|
||||
|
||||
// podTracker records a serial history of events that might've affects pods.
|
||||
type podTracker struct {
|
||||
cache.ThreadSafeStore
|
||||
}
|
||||
|
||||
func (p *podTracker) remember(pod *v1.Pod, eventType string) {
|
||||
if eventType == UPDATE && pod.Status.Phase == v1.PodRunning {
|
||||
return
|
||||
}
|
||||
p.Add(fmt.Sprintf("[%v] %v: %v", time.Now(), eventType, pod.Name), pod)
|
||||
}
|
||||
|
||||
func (p *podTracker) String() (msg string) {
|
||||
for _, k := range p.ListKeys() {
|
||||
obj, exists := p.Get(k)
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
pod := obj.(*v1.Pod)
|
||||
msg += fmt.Sprintf("%v Phase %v Host %v\n", k, pod.Status.Phase, pod.Spec.NodeName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newPodTracker() *podTracker {
|
||||
return &podTracker{cache.NewThreadSafeStore(
|
||||
cache.Indexers{}, cache.Indices{})}
|
||||
}
|
||||
|
||||
// replacePods replaces content of the store with the given pods.
|
||||
func replacePods(pods []*v1.Pod, store cache.Store) {
|
||||
found := make([]interface{}, 0, len(pods))
|
||||
for i := range pods {
|
||||
found = append(found, pods[i])
|
||||
}
|
||||
framework.ExpectNoError(store.Replace(found, "0"))
|
||||
}
|
||||
|
||||
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
|
||||
// and a list of nodenames across which these containers restarted.
|
||||
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
|
||||
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
framework.ExpectNoError(err)
|
||||
failedContainers := 0
|
||||
containerRestartNodes := sets.NewString()
|
||||
for _, p := range pods.Items {
|
||||
for _, v := range testutils.FailedContainers(&p) {
|
||||
failedContainers = failedContainers + v.Restarts
|
||||
containerRestartNodes.Insert(p.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
return failedContainers, containerRestartNodes.List()
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("daemonrestart")
|
||||
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(uuid.NewUUID())
|
||||
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
|
||||
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
var ns string
|
||||
var config testutils.RCConfig
|
||||
var controller cache.Controller
|
||||
var newPods cache.Store
|
||||
var stopCh chan struct{}
|
||||
var tracker *podTracker
|
||||
|
||||
BeforeEach(func() {
|
||||
// These tests require SSH
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
ns = f.Namespace.Name
|
||||
|
||||
// All the restart tests need an rc and a watch on pods of the rc.
|
||||
// Additionally some of them might scale the rc during the test.
|
||||
config = testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: ns,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
}
|
||||
Expect(framework.RunRC(config)).NotTo(HaveOccurred())
|
||||
replacePods(*config.CreatedPods, existingPods)
|
||||
|
||||
stopCh = make(chan struct{})
|
||||
tracker = newPodTracker()
|
||||
newPods, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
obj, err := f.ClientSet.CoreV1().Pods(ns).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Pod{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
tracker.remember(obj.(*v1.Pod), ADD)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
tracker.remember(newObj.(*v1.Pod), UPDATE)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
tracker.remember(obj.(*v1.Pod), DEL)
|
||||
},
|
||||
},
|
||||
)
|
||||
go controller.Run(stopCh)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
})
|
||||
|
||||
It("Controller Manager should not create/delete replicas across restart", func() {
|
||||
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout)
|
||||
restarter.restart()
|
||||
|
||||
// The intent is to ensure the replication controller manager has observed and reported status of
|
||||
// the replication controller at least once since the manager restarted, so that we can determine
|
||||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||
// and awaits it to be observed and reported back in the RC's status.
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true)
|
||||
|
||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||
// TODO: Can it really?
|
||||
existingKeys := sets.NewString()
|
||||
newKeys := sets.NewString()
|
||||
for _, k := range existingPods.ListKeys() {
|
||||
existingKeys.Insert(k)
|
||||
}
|
||||
for _, k := range newPods.ListKeys() {
|
||||
newKeys.Insert(k)
|
||||
}
|
||||
if len(newKeys.List()) != len(existingKeys.List()) ||
|
||||
!newKeys.IsSuperset(existingKeys) {
|
||||
framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
|
||||
}
|
||||
})
|
||||
|
||||
It("Scheduler should continue assigning pods to nodes across restart", func() {
|
||||
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout)
|
||||
|
||||
// Create pods while the scheduler is down and make sure the scheduler picks them up by
|
||||
// scaling the rc to the same size.
|
||||
restarter.waitUp()
|
||||
restarter.kill()
|
||||
// This is best effort to try and create pods while the scheduler is down,
|
||||
// since we don't know exactly when it is restarted after the kill signal.
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp()
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
|
||||
nodeIPs, err := framework.GetNodePublicIps(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if preRestarts != 0 {
|
||||
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||
}
|
||||
for _, ip := range nodeIPs {
|
||||
restarter := NewRestartConfig(
|
||||
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout)
|
||||
restarter.restart()
|
||||
}
|
||||
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if postRestarts != preRestarts {
|
||||
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
|
||||
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
|
||||
}
|
||||
})
|
||||
})
|
928
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
Normal file
928
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
Normal file
@ -0,0 +1,928 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// this should not be a multiple of 5, because node status updates
|
||||
// every 5 seconds. See https://github.com/kubernetes/kubernetes/pull/14915.
|
||||
dsRetryPeriod = 1 * time.Second
|
||||
dsRetryTimeout = 5 * time.Minute
|
||||
|
||||
daemonsetLabelPrefix = "daemonset-"
|
||||
daemonsetNameLabel = daemonsetLabelPrefix + "name"
|
||||
daemonsetColorLabel = daemonsetLabelPrefix + "color"
|
||||
)
|
||||
|
||||
// This test must be run in serial because it assumes the Daemon Set pods will
|
||||
// always get scheduled. If we run other tests in parallel, this may not
|
||||
// happen. In the future, running in parallel may work if we have an eviction
|
||||
// model which lets the DS controller kick out other pods to make room.
|
||||
// See http://issues.k8s.io/21767 for more details
|
||||
var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
var f *framework.Framework
|
||||
|
||||
AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
By(fmt.Sprintf("Deleting DaemonSet %q with reaper", ds.Name))
|
||||
dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = dsReaper.Stop(f.Namespace.Name, ds.Name, 0, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
f = framework.NewDefaultFramework("daemonsets")
|
||||
|
||||
image := framework.ServeHostnameImage
|
||||
dsName := "daemon-set"
|
||||
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
c = f.ClientSet
|
||||
err := clearDaemonSetNodeLabels(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should run and stop simple daemon", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
It("should run and stop complex daemon", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
||||
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
|
||||
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should run and stop complex daemon with node affinity", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node affinity", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: daemonsetColorLabel,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodeSelector[daemonsetColorLabel]},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
||||
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
||||
It("should retry creating failed daemon pods", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
pod.ResourceVersion = ""
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
_, err = c.CoreV1().Pods(ns).UpdateStatus(&pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
It("Should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
templateGeneration := "1"
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(first.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
|
||||
|
||||
By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, templateGeneration)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
Expect(cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
})
|
||||
|
||||
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
templateGeneration++
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
})
|
||||
|
||||
It("Should adopt existing pods when creating a RollingUpdate DaemonSet regardless of templateGeneration", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
// 1. Create a RollingUpdate DaemonSet
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Make sure all daemon pods have correct template generation %d", templateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Orphan DaemonSet pods
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", dsName)
|
||||
deleteDaemonSetAndOrphan(c, ds)
|
||||
|
||||
// 3. Adopt DaemonSet pods (no restart)
|
||||
newDSName := "adopt"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName)
|
||||
newDS := newDaemonSet(newDSName, image, label)
|
||||
newDS.Spec.TemplateGeneration = templateGeneration
|
||||
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newDS.Name)
|
||||
waitDaemonSetAdoption(c, newDS, ds.Name, templateGeneration)
|
||||
|
||||
// 4. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newDSName)
|
||||
deleteDaemonSetAndOrphan(c, newDS)
|
||||
|
||||
// 5. Adopt DaemonSet pods (no restart) as long as template matches, even when templateGeneration doesn't match
|
||||
newAdoptDSName := "adopt-template-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS := newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf(fmt.Sprintf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name))
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
|
||||
// 6. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newAdoptDSName)
|
||||
deleteDaemonSetAndOrphan(c, newAdoptDS)
|
||||
|
||||
// 7. Adopt DaemonSet pods (no restart) as long as templateGeneration matches, even when template doesn't match
|
||||
newAdoptDSName = "adopt-template-generation-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS = newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match"
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS.Spec.TemplateGeneration = templateGeneration
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
})
|
||||
|
||||
It("Should rollback without unnecessary restarts", func() {
|
||||
// Skip clusters with only one node, where we cannot have half-done DaemonSet rollout for this test
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
for i := range pods.Items {
|
||||
pod := pods.Items[i]
|
||||
image := pod.Spec.Containers[0].Image
|
||||
switch image {
|
||||
case ds.Spec.Template.Spec.Containers[0].Image:
|
||||
existingPods = append(existingPods, &pod)
|
||||
case newDS.Spec.Template.Spec.Containers[0].Image:
|
||||
newPods = append(newPods, &pod)
|
||||
default:
|
||||
framework.Failf("unexpected pod found, image = %s", image)
|
||||
}
|
||||
}
|
||||
Expect(len(existingPods)).NotTo(Equal(0))
|
||||
Expect(len(newPods)).NotTo(Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
rollbackPods := map[string]bool{}
|
||||
for _, pod := range pods.Items {
|
||||
rollbackPods[pod.Name] = true
|
||||
}
|
||||
for _, pod := range existingPods {
|
||||
Expect(rollbackPods[pod.Name]).To(BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// getDaemonSetImagePatch generates a patch for updating a DaemonSet's container image
|
||||
func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
||||
}
|
||||
|
||||
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
|
||||
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
||||
err := c.ExtensionsV1beta1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(c, ds.Namespace, ds.Name))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(podList.Items)).To(BeNumerically(">", 0))
|
||||
return podList
|
||||
}
|
||||
|
||||
func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, map[string]string) {
|
||||
daemonSetLabels := map[string]string{}
|
||||
otherLabels := map[string]string{}
|
||||
for k, v := range labels {
|
||||
if strings.HasPrefix(k, daemonsetLabelPrefix) {
|
||||
daemonSetLabels[k] = v
|
||||
} else {
|
||||
otherLabels[k] = v
|
||||
}
|
||||
}
|
||||
return daemonSetLabels, otherLabels
|
||||
}
|
||||
|
||||
func clearDaemonSetNodeLabels(c clientset.Interface) error {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(c)
|
||||
for _, node := range nodeList.Items {
|
||||
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
|
||||
nodeClient := c.CoreV1().Nodes()
|
||||
var newNode *v1.Node
|
||||
var newLabels map[string]string
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
|
||||
node, err := nodeClient.Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// remove all labels this test is creating
|
||||
daemonSetLabels, otherLabels := separateDaemonSetNodeLabels(node.Labels)
|
||||
if reflect.DeepEqual(daemonSetLabels, labels) {
|
||||
newNode = node
|
||||
return true, nil
|
||||
}
|
||||
node.Labels = otherLabels
|
||||
for k, v := range labels {
|
||||
node.Labels[k] = v
|
||||
}
|
||||
newNode, err = nodeClient.Update(node)
|
||||
if err == nil {
|
||||
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
|
||||
return true, err
|
||||
}
|
||||
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
framework.Logf("failed to update node due to resource version conflict")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(newLabels) != len(labels) {
|
||||
return nil, fmt.Errorf("Could not set daemon set test labels as expected.")
|
||||
}
|
||||
|
||||
return newNode, nil
|
||||
}
|
||||
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("could not get the pod list: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
pods := podList.Items
|
||||
|
||||
nodesToPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(&pod, ds) {
|
||||
continue
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
nodesToPodCount[pod.Spec.NodeName] += 1
|
||||
}
|
||||
}
|
||||
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
|
||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||
for _, nodeName := range nodeNames {
|
||||
if nodesToPodCount[nodeName] != 1 {
|
||||
framework.Logf("Node %s is running more than one daemon pod", nodeName)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
||||
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
||||
// other nodes.
|
||||
return len(nodesToPodCount) == len(nodeNames), nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeNames := schedulableNodes(f.ClientSet, ds)
|
||||
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
}
|
||||
}
|
||||
|
||||
func schedulableNodes(c clientset.Interface, ds *extensions.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
return nodeNames
|
||||
}
|
||||
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.Containers[0].Image == newImage {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
fit, _, err := daemon.Predicates(newPod, nodeInfo)
|
||||
if err != nil {
|
||||
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
|
||||
return false
|
||||
}
|
||||
return fit
|
||||
}
|
||||
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from v1.")
|
||||
}
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pods := podList.Items
|
||||
|
||||
unavailablePods := 0
|
||||
nodesToUpdatedPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(&pod, ds) {
|
||||
continue
|
||||
}
|
||||
podImage := pod.Spec.Containers[0].Image
|
||||
if podImage != image {
|
||||
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
} else {
|
||||
nodesToUpdatedPodCount[pod.Spec.NodeName] += 1
|
||||
}
|
||||
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
framework.Logf("Pod %s is not available", pod.Name)
|
||||
unavailablePods++
|
||||
}
|
||||
}
|
||||
if unavailablePods > maxUnavailable {
|
||||
return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable)
|
||||
}
|
||||
// Make sure every daemon pod on the node has been updated
|
||||
nodeNames := schedulableNodes(c, ds)
|
||||
for _, node := range nodeNames {
|
||||
if nodesToUpdatedPodCount[node] == 0 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// We don't care about inactive pods
|
||||
if !controller.IsPodActive(&pod) {
|
||||
continue
|
||||
}
|
||||
podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
if podTemplateGeneration != templateGeneration {
|
||||
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
_, err := c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// This pod is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetHistoryOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
histories := listDaemonHistories(c, ns, label)
|
||||
for _, history := range histories.Items {
|
||||
// This history is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// This pod is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
histories := listDaemonHistories(c, ns, label)
|
||||
for _, history := range histories.Items {
|
||||
// This history is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef == nil || controllerRef.UID != dsUID {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *extensions.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
ns := ds.Namespace
|
||||
label := ds.Spec.Template.Labels
|
||||
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
|
||||
|
||||
framework.Logf("Make sure no daemon pod updated its template generation %d", podTemplateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(podTemplateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure no pods are recreated by looking at their names")
|
||||
err = checkDaemonSetPodsName(c, ns, podPrefix, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if !strings.HasPrefix(pod.Name, prefix) {
|
||||
return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash, templateGeneration string) {
|
||||
for _, pod := range podList.Items {
|
||||
podHash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
podTemplate := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
Expect(len(podHash)).To(BeNumerically(">", 0))
|
||||
if len(hash) > 0 {
|
||||
Expect(podHash).To(Equal(hash))
|
||||
}
|
||||
Expect(len(podTemplate)).To(BeNumerically(">", 0))
|
||||
Expect(podTemplate).To(Equal(templateGeneration))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]string, numHistory int) {
|
||||
listHistoryFn := func() (bool, error) {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(historyList.Items) == numHistory {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
|
||||
return false, nil
|
||||
}
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for controllerrevisions to be created")
|
||||
}
|
||||
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(historyList.Items)).To(BeNumerically(">", 0))
|
||||
return historyList
|
||||
}
|
||||
|
||||
func curHistory(historyList *apps.ControllerRevisionList, ds *extensions.DaemonSet) *apps.ControllerRevision {
|
||||
var curHistory *apps.ControllerRevision
|
||||
foundCurHistories := 0
|
||||
for i := range historyList.Items {
|
||||
history := &historyList.Items[i]
|
||||
// Every history should have the hash label
|
||||
Expect(len(history.Labels[extensions.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if match {
|
||||
curHistory = history
|
||||
foundCurHistories++
|
||||
}
|
||||
}
|
||||
Expect(foundCurHistories).To(Equal(1))
|
||||
Expect(curHistory).NotTo(BeNil())
|
||||
return curHistory
|
||||
}
|
826
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
Normal file
826
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
Normal file
@ -0,0 +1,826 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
dRetryPeriod = 2 * time.Second
|
||||
dRetryTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
nilRs *extensions.ReplicaSet
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Deployment", func() {
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
AfterEach(func() {
|
||||
failureTrap(c, ns)
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("deployment")
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("deployment reaping should cascade to its replica sets and pods", func() {
|
||||
testDeleteDeployment(f)
|
||||
})
|
||||
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
|
||||
testRollingUpdateDeployment(f)
|
||||
})
|
||||
It("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
testRecreateDeployment(f)
|
||||
})
|
||||
It("deployment should delete old replica sets", func() {
|
||||
testDeploymentCleanUpPolicy(f)
|
||||
})
|
||||
It("deployment should support rollover", func() {
|
||||
testRolloverDeployment(f)
|
||||
})
|
||||
It("deployment should support rollback", func() {
|
||||
testRollbackDeployment(f)
|
||||
})
|
||||
It("iterative rollouts should eventually progress", func() {
|
||||
testIterativeDeployments(f)
|
||||
})
|
||||
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
testDeploymentsControllerRef(f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
// See https://github.com/kubernetes/kubernetes/issues/29229
|
||||
})
|
||||
|
||||
func failureTrap(c clientset.Interface, ns string) {
|
||||
deployments, err := c.ExtensionsV1beta1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for i := range deployments.Items {
|
||||
d := deployments.Items[i]
|
||||
|
||||
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
|
||||
return
|
||||
}
|
||||
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
|
||||
rsList := allOldRSs
|
||||
if newRS != nil {
|
||||
rsList = append(rsList, newRS)
|
||||
}
|
||||
testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
|
||||
}
|
||||
// We need print all the ReplicaSets if there are no Deployment object created
|
||||
if len(deployments.Items) != 0 {
|
||||
return
|
||||
}
|
||||
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for _, rs := range rss.Items {
|
||||
framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(rs.Namespace).List(options)
|
||||
for _, pod := range podList.Items {
|
||||
framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func intOrStrP(num int) *intstr.IntOrString {
|
||||
intstr := intstr.FromInt(num)
|
||||
return &intstr
|
||||
}
|
||||
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
|
||||
return &extensions.DeploymentRollback{
|
||||
Name: name,
|
||||
UpdatedAnnotations: annotations,
|
||||
RollbackTo: extensions.RollbackConfig{Revision: revision},
|
||||
}
|
||||
}
|
||||
|
||||
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
|
||||
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rss.Items).Should(HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
var pods *v1.PodList
|
||||
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Pods may be created by overlapping deployments right after this deployment is deleted, ignore them
|
||||
if len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
|
||||
}
|
||||
}
|
||||
|
||||
func testDeleteDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
internalClient := f.InternalClientset
|
||||
|
||||
deploymentName := "test-new-deployment"
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRS).NotTo(Equal(nilRs))
|
||||
stopDeployment(c, internalClient, ns, deploymentName)
|
||||
}
|
||||
|
||||
func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod",
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
rsName := "test-rolling-update-controller"
|
||||
replicas := int32(1)
|
||||
rsRevision := "3546343826724305832"
|
||||
annotations := make(map[string]string)
|
||||
annotations[deploymentutil.RevisionAnnotation] = rsRevision
|
||||
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %s", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(allOldRSs)).Should(Equal(1))
|
||||
// The old RS should contain pod-template-hash in its selector, label, and template label
|
||||
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
|
||||
func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "cleanup-pod",
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
rsName := "test-cleanup-controller"
|
||||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
w, err := c.CoreV1().Pods(ns).Watch(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
go func() {
|
||||
// There should be only one pod being created, which is the pod with the redis image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
numPodCreation := 1
|
||||
for {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
continue
|
||||
}
|
||||
numPodCreation--
|
||||
if numPodCreation < 0 {
|
||||
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
|
||||
}
|
||||
pod, ok := event.Object.(*v1.Pod)
|
||||
if !ok {
|
||||
framework.Failf("Expect event Object to be a pod")
|
||||
}
|
||||
if pod.Spec.Containers[0].Name != RedisImageName {
|
||||
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
|
||||
}
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// testRolloverDeployment tests that deployment supports rollover.
|
||||
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
|
||||
func testRolloverDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
podName := "rollover-pod"
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": podName,
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
Expect(framework.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred())
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: intOrStrP(0),
|
||||
MaxSurge: intOrStrP(1),
|
||||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(newDeployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
||||
func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) {
|
||||
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
|
||||
Expect(rs.Status.Replicas).Should(Equal(replicas))
|
||||
}
|
||||
|
||||
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
|
||||
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
|
||||
// and then rollback to last revision (which is revision 4 that comes from revision 2).
|
||||
// Then rollback the deployment to revision 10 (doesn't exist in history) should fail.
|
||||
// Finally, rollback current deployment (revision 4) to revision 4 should be no-op.
|
||||
func testRollbackDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
podName := "nginx"
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
|
||||
// 1. Create a deployment to create nginx pods.
|
||||
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
revision := int64(1)
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackDone in deployment status and check it here
|
||||
|
||||
// Wait for it to be updated to revision 3
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to last revision
|
||||
revision = 0
|
||||
framework.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 4
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
// Since there's no revision 10 in history, it should stay as revision 4
|
||||
revision = 10
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackRevisionNotFound in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since there's no revision 10
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 6. Update the deploymentRollback to rollback to revision 4
|
||||
// Since it's already revision 4, it should be no-op
|
||||
revision = 4
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since it's already revision 4
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func randomScale(d *extensions.Deployment, i int) {
|
||||
switch r := rand.Float32(); {
|
||||
case r < 0.3:
|
||||
framework.Logf("%02d: scaling up", i)
|
||||
*(d.Spec.Replicas)++
|
||||
case r < 0.6:
|
||||
if *(d.Spec.Replicas) > 1 {
|
||||
framework.Logf("%02d: scaling down", i)
|
||||
*(d.Spec.Replicas)--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testIterativeDeployments(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(6)
|
||||
zero := int64(0)
|
||||
two := int32(2)
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx"
|
||||
thirty := int32(30)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.ProgressDeadlineSeconds = &thirty
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
iterations := 20
|
||||
for i := 0; i < iterations; i++ {
|
||||
if r := rand.Float32(); r < 0.6 {
|
||||
time.Sleep(time.Duration(float32(i) * r * float32(time.Second)))
|
||||
}
|
||||
|
||||
switch n := rand.Float32(); {
|
||||
case n < 0.2:
|
||||
// trigger a new deployment
|
||||
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
|
||||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
rollbackTo := &extensions.RollbackConfig{Revision: 0}
|
||||
update.Spec.RollbackTo = rollbackTo
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case n < 0.8:
|
||||
// toggling the deployment
|
||||
if deployment.Spec.Paused {
|
||||
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
default:
|
||||
// arbitrarily delete deployment pods
|
||||
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(opts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(podList.Items) == 0 {
|
||||
framework.Logf("%02d: no deployment pods to delete", i)
|
||||
continue
|
||||
}
|
||||
for p := range podList.Items {
|
||||
if rand.Float32() < 0.5 {
|
||||
continue
|
||||
}
|
||||
name := podList.Items[p].Name
|
||||
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
})
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for deployment %q status", deploymentName)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
deploymentName := "test-orphan-deployment"
|
||||
framework.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking its ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSets", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for the ReplicaSet to be orphaned")
|
||||
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
rsList := listDeploymentReplicaSets(c, ns, label)
|
||||
for _, rs := range rsList.Items {
|
||||
// This rs is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid {
|
||||
return fmt.Errorf("ReplicaSet %s has unexpected controllerRef %v", rs.Name, controllerRef)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
rsList := listDeploymentReplicaSets(c, ns, label)
|
||||
for _, rs := range rsList.Items {
|
||||
// This rs is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
|
||||
return rsList
|
||||
}
|
||||
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
|
||||
return c.ExtensionsV1beta1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
|
||||
}
|
335
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
Normal file
335
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// schedulingTimeout is longer specifically because sometimes we need to wait
|
||||
// awhile to guarantee that we've been patient waiting for something ordinary
|
||||
// to happen: a pod to get scheduled and move into Ready
|
||||
const (
|
||||
bigClusterSize = 7
|
||||
schedulingTimeout = 10 * time.Minute
|
||||
timeout = 60 * time.Second
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("DisruptionController", func() {
|
||||
f := framework.NewDefaultFramework("disruption")
|
||||
var ns string
|
||||
var cs kubernetes.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("should create a PodDisruptionBudget", func() {
|
||||
createPDBMinAvailableOrDie(cs, ns, intstr.FromString("1%"))
|
||||
})
|
||||
|
||||
It("should update PodDisruptionBudget status", func() {
|
||||
createPDBMinAvailableOrDie(cs, ns, intstr.FromInt(2))
|
||||
|
||||
createPodsOrDie(cs, ns, 3)
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
|
||||
// Since disruptionAllowed starts out 0, if we see it ever become positive,
|
||||
// that means the controller is working.
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
pdb, err := cs.Policy().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return pdb.Status.PodDisruptionsAllowed > 0, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
evictionCases := []struct {
|
||||
description string
|
||||
minAvailable intstr.IntOrString
|
||||
maxUnavailable intstr.IntOrString
|
||||
podCount int
|
||||
replicaSetSize int32
|
||||
shouldDeny bool
|
||||
exclusive bool
|
||||
skipForBigClusters bool
|
||||
}{
|
||||
{
|
||||
description: "no PDB",
|
||||
minAvailable: intstr.FromString(""),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
podCount: 1,
|
||||
shouldDeny: false,
|
||||
}, {
|
||||
description: "too few pods, absolute",
|
||||
minAvailable: intstr.FromInt(2),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
podCount: 2,
|
||||
shouldDeny: true,
|
||||
}, {
|
||||
description: "enough pods, absolute",
|
||||
minAvailable: intstr.FromInt(2),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
podCount: 3,
|
||||
shouldDeny: false,
|
||||
}, {
|
||||
description: "enough pods, replicaSet, percentage",
|
||||
minAvailable: intstr.FromString("90%"),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
replicaSetSize: 10,
|
||||
exclusive: false,
|
||||
shouldDeny: false,
|
||||
}, {
|
||||
description: "too few pods, replicaSet, percentage",
|
||||
minAvailable: intstr.FromString("90%"),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
replicaSetSize: 10,
|
||||
exclusive: true,
|
||||
shouldDeny: true,
|
||||
// This tests assumes that there is less than replicaSetSize nodes in the cluster.
|
||||
skipForBigClusters: true,
|
||||
},
|
||||
{
|
||||
description: "maxUnavailable allow single eviction, percentage",
|
||||
minAvailable: intstr.FromString(""),
|
||||
maxUnavailable: intstr.FromString("10%"),
|
||||
replicaSetSize: 10,
|
||||
exclusive: false,
|
||||
shouldDeny: false,
|
||||
},
|
||||
{
|
||||
description: "maxUnavailable deny evictions, integer",
|
||||
minAvailable: intstr.FromString(""),
|
||||
maxUnavailable: intstr.FromInt(1),
|
||||
replicaSetSize: 10,
|
||||
exclusive: true,
|
||||
shouldDeny: true,
|
||||
// This tests assumes that there is less than replicaSetSize nodes in the cluster.
|
||||
skipForBigClusters: true,
|
||||
},
|
||||
}
|
||||
for i := range evictionCases {
|
||||
c := evictionCases[i]
|
||||
expectation := "should allow an eviction"
|
||||
if c.shouldDeny {
|
||||
expectation = "should not allow an eviction"
|
||||
}
|
||||
It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() {
|
||||
if c.skipForBigClusters {
|
||||
framework.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
|
||||
}
|
||||
createPodsOrDie(cs, ns, c.podCount)
|
||||
if c.replicaSetSize > 0 {
|
||||
createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive)
|
||||
}
|
||||
|
||||
if c.minAvailable.String() != "" {
|
||||
createPDBMinAvailableOrDie(cs, ns, c.minAvailable)
|
||||
}
|
||||
|
||||
if c.maxUnavailable.String() != "" {
|
||||
createPDBMaxUnavailableOrDie(cs, ns, c.maxUnavailable)
|
||||
}
|
||||
|
||||
// Locate a running pod.
|
||||
var pod v1.Pod
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := range podList.Items {
|
||||
if podList.Items[i].Status.Phase == v1.PodRunning {
|
||||
pod = podList.Items[i]
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
e := &policy.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
|
||||
if c.shouldDeny {
|
||||
// Since disruptionAllowed starts out false, wait at least 60s hoping that
|
||||
// this gives the controller enough time to have truly set the status.
|
||||
time.Sleep(timeout)
|
||||
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
|
||||
} else {
|
||||
// Only wait for running pods in the "allow" case
|
||||
// because one of shouldDeny cases relies on the
|
||||
// replicaSet not fitting on the cluster.
|
||||
waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize))
|
||||
|
||||
// Since disruptionAllowed starts out false, if an eviction is ever allowed,
|
||||
// that means the controller is working.
|
||||
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) {
|
||||
pdb := policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
|
||||
pdb := policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%d", i),
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
By("Waiting for all pods to be running")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pods == nil {
|
||||
return false, fmt.Errorf("pods is nil")
|
||||
}
|
||||
if len(pods.Items) < n {
|
||||
framework.Logf("pods: %v < %v", len(pods.Items), n)
|
||||
return false, nil
|
||||
}
|
||||
ready := 0
|
||||
for i := 0; i < n; i++ {
|
||||
if pods.Items[i].Status.Phase == v1.PodRunning {
|
||||
ready++
|
||||
}
|
||||
}
|
||||
if ready < n {
|
||||
framework.Logf("running pods: %v < %v", ready, n)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
|
||||
}
|
||||
|
||||
func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) {
|
||||
container := v1.Container{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
}
|
||||
if exclusive {
|
||||
container.Ports = []v1.ContainerPort{
|
||||
{HostPort: 5555, ContainerPort: 5555},
|
||||
}
|
||||
}
|
||||
|
||||
rs := &extensions.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "rs",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: &size,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{container},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.Extensions().ReplicaSets(ns).Create(rs)
|
||||
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e/apps/framework.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e/apps/framework.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-apps] "+text, body)
|
||||
}
|
194
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
Normal file
194
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Job", func() {
|
||||
f := framework.NewDefaultFramework("job")
|
||||
parallelism := int32(2)
|
||||
completions := int32(4)
|
||||
backoffLimit := int32(6) // default value
|
||||
|
||||
// Simplest case: all pods succeed promptly
|
||||
It("should run a job to completion when tasks succeed", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed.
|
||||
It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
|
||||
By("Creating a job")
|
||||
// One failure, then a success, local restarts.
|
||||
// We can't use the random failure approach used by the
|
||||
// non-local test below, because kubelet will throttle
|
||||
// frequently failing containers in a given pod, ramping
|
||||
// up to 5 minutes between restarts, making test timeouts
|
||||
// due to successive failures too likely with a reasonable
|
||||
// test timeout.
|
||||
job := framework.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed, after pod restarts
|
||||
It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
|
||||
By("Creating a job")
|
||||
// 50% chance of container success, local restarts.
|
||||
// Can't use the failOnce approach because that relies
|
||||
// on an emptyDir, which is not preserved across new pods.
|
||||
// Worst case analysis: 15 failures, each taking 1 minute to
|
||||
// run due to some slowness, 1 in 2^15 chance of happening,
|
||||
// causing test flake. Should be very rare.
|
||||
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, 999)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should exceed active deadline", func() {
|
||||
By("Creating a job")
|
||||
var activeDeadlineSeconds int64 = 1
|
||||
job := framework.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Ensuring job past active deadline")
|
||||
err = framework.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should delete a job", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring active pods == parallelism")
|
||||
err = framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("delete a job")
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should adopt matching orphans and release non-matching pods", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
// Replace job with the one returned from Create() so it has the UID.
|
||||
// Save Kind since it won't be populated in the returned job.
|
||||
kind := job.Kind
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
job.Kind = kind
|
||||
|
||||
By("Ensuring active pods == parallelism")
|
||||
err = framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Orphaning one of the Job's Pods")
|
||||
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(int(parallelism)))
|
||||
pod := pods.Items[0]
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.OwnerReferences = nil
|
||||
})
|
||||
|
||||
By("Checking that the Job readopts the Pod")
|
||||
Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", framework.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return false, nil
|
||||
}
|
||||
if controllerRef.Kind != job.Kind || controllerRef.Name != job.Name || controllerRef.UID != job.UID {
|
||||
return false, fmt.Errorf("pod has wrong controllerRef: got %v, want %v", controllerRef, job)
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
|
||||
By("Removing the labels from the Job's Pod")
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = nil
|
||||
})
|
||||
|
||||
By("Checking that the Job releases the Pod")
|
||||
Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", framework.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be released", pod.Name)
|
||||
})
|
||||
|
||||
It("should exceed backoffLimit", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, 0)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Ensuring job exceed backofflimit")
|
||||
|
||||
err = framework.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, framework.JobTimeout, "BackoffLimitExceeded")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that only one pod created and status is failed")
|
||||
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(1))
|
||||
pod := pods.Items[0]
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
})
|
||||
})
|
639
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
Normal file
639
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
Normal file
@ -0,0 +1,639 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
nodepkg "k8s.io/kubernetes/pkg/controller/node"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
podReadyTimeout = 2 * time.Minute
|
||||
podNotReadyTimeout = 1 * time.Minute
|
||||
nodeReadinessTimeout = 3 * time.Minute
|
||||
resizeNodeReadyTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
|
||||
timeout := false
|
||||
expected := false
|
||||
timer := time.After(nodeReadinessTimeout)
|
||||
for !expected && !timeout {
|
||||
select {
|
||||
case n := <-newNode:
|
||||
if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
|
||||
expected = true
|
||||
} else {
|
||||
framework.Logf("Observed node ready status is NOT %v as expected", isReady)
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
}
|
||||
}
|
||||
if !expected {
|
||||
framework.Failf("Failed to observe node ready status change to %v", isReady)
|
||||
}
|
||||
}
|
||||
|
||||
func podOnNode(podName, nodeName string, image string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"name": podName,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
NodeName: nodeName,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
|
||||
pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
|
||||
if err == nil {
|
||||
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
|
||||
} else {
|
||||
framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("network-partition")
|
||||
var systemPodsNo int32
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
var group string
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke", "aws")
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
} else {
|
||||
group = framework.TestContext.CloudConfig.NodeInstanceGroup
|
||||
}
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Pods", func() {
|
||||
Context("should return to running and ready state after network partition is healed", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
})
|
||||
|
||||
// What happens in this test:
|
||||
// Network traffic from a node to master is cut off to simulate network partition
|
||||
// Expect to observe:
|
||||
// 1. Node is marked NotReady after timeout by nodecontroller (40seconds)
|
||||
// 2. All pods on node are marked NotReady shortly after #1
|
||||
// 3. Node and pods return to Ready after connectivivty recovers
|
||||
It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
"AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() {
|
||||
By("choose a node - we will block all network traffic on this node")
|
||||
var podOpts metav1.ListOptions
|
||||
nodeOpts := metav1.ListOptions{}
|
||||
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||
return false
|
||||
}
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
if err != nil || len(pods.Items) <= 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if len(nodes.Items) <= 0 {
|
||||
framework.Failf("No eligible node were found: %d", len(nodes.Items))
|
||||
}
|
||||
node := nodes.Items[0]
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
|
||||
By("Set up watch on node status")
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
|
||||
stopCh := make(chan struct{})
|
||||
newNode := make(chan *v1.Node)
|
||||
var controller cache.Controller
|
||||
_, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
return f.ClientSet.CoreV1().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Node{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
Expect(ok).To(Equal(true))
|
||||
newNode <- n
|
||||
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// Will not explicitly close newNode channel here due to
|
||||
// race condition where stopCh and newNode are closed but informer onUpdate still executes.
|
||||
close(stopCh)
|
||||
}()
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
|
||||
expectNodeReadiness(true, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("[ReplicationController]", func() {
|
||||
It("should recreate pods scheduled on the unreachable node "+
|
||||
"AND allow scheduling of pods on a node after it rejoins the cluster", func() {
|
||||
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-net"
|
||||
common.NewSVCByName(c, ns, name)
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, nil)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||
// that belongs to replication controller 'rcName', really disappeared (because its
|
||||
// grace period is set to 0).
|
||||
// Finally, it checks that the replication controller recreates the
|
||||
// pods on another node and that now the number of replicas is equal 'replicas'.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("verifying whether the pod from the unreachable node is recreated")
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
|
||||
// sleep a bit, to allow Watch in NodeController to catch up.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
By("verify whether new pods can be created on the re-attached node")
|
||||
// increasing the RC size is not a valid way to test this
|
||||
// since we have no guarantees the pod will be scheduled on our node.
|
||||
additionalPod := "additionalpod"
|
||||
err = newPodOnNode(c, ns, additionalPod, node.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.VerifyPods(c, ns, additionalPod, true, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// verify that it is really on the requested node
|
||||
{
|
||||
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() {
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-net"
|
||||
gracePeriod := int64(30)
|
||||
|
||||
common.NewSVCByName(c, ns, name)
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||
// that belongs to replication controller 'rcName', did not disappear (because its
|
||||
// grace period is set to 30).
|
||||
// Finally, it checks that the replication controller recreates the
|
||||
// pods on another node and that now the number of replicas is equal 'replicas + 1'.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
|
||||
_, err = framework.PodsCreated(c, ns, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("[StatefulSet]", func() {
|
||||
psName := "ss"
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
headlessSvcName := "test"
|
||||
|
||||
BeforeEach(func() {
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
|
||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
framework.ExpectNoError(err)
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all stateful set in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
It("should come back up if node goes down [Slow] [Disruptive]", func() {
|
||||
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
|
||||
nn := framework.TestContext.CloudConfig.NumNodes
|
||||
nodeNames, err := framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, nn)
|
||||
framework.ExpectNoError(err)
|
||||
common.RestartNodes(f.ClientSet, nodeNames)
|
||||
|
||||
By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
})
|
||||
|
||||
It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
|
||||
pod := pst.GetPodList(ps).Items[0]
|
||||
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
|
||||
// that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver.
|
||||
// The grace period on the stateful pods is set to a value > 0.
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
|
||||
By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("[Job]", func() {
|
||||
It("should create new pods when node is partitioned", func() {
|
||||
parallelism := int32(2)
|
||||
completions := int32(4)
|
||||
backoffLimit := int32(6) // default value
|
||||
|
||||
job := framework.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
|
||||
parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{framework.JobSelectorKey: job.Name}))
|
||||
|
||||
By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
|
||||
// running pods after the node-controller detects node unreachable.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Pods", func() {
|
||||
Context("should be evicted from unready Node", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
})
|
||||
|
||||
// What happens in this test:
|
||||
// Network traffic from a node to master is cut off to simulate network partition
|
||||
// Expect to observe:
|
||||
// 1. Node is marked NotReady after timeout by nodecontroller (40seconds)
|
||||
// 2. All pods on node are marked NotReady shortly after #1
|
||||
// 3. After enough time passess all Pods are evicted from the given Node
|
||||
It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
"AND all pods should be evicted after eviction timeout passes", func() {
|
||||
By("choose a node - we will block all network traffic on this node")
|
||||
var podOpts metav1.ListOptions
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||
return false
|
||||
}
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
if err != nil || len(pods.Items) <= 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if len(nodes.Items) <= 0 {
|
||||
framework.Failf("No eligible node were found: %d", len(nodes.Items))
|
||||
}
|
||||
node := nodes.Items[0]
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
if err := framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
|
||||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
podTolerationTimes := map[string]time.Duration{}
|
||||
// This test doesn't add tolerations by itself, but because they may be present in the cluster
|
||||
// it needs to account for that.
|
||||
for _, pod := range pods.Items {
|
||||
namespacedName := fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
|
||||
tolerations := pod.Spec.Tolerations
|
||||
framework.ExpectNoError(err)
|
||||
for _, toleration := range tolerations {
|
||||
if toleration.ToleratesTaint(nodepkg.UnreachableTaintTemplate) {
|
||||
if toleration.TolerationSeconds != nil {
|
||||
podTolerationTimes[namespacedName] = time.Duration(*toleration.TolerationSeconds) * time.Second
|
||||
break
|
||||
} else {
|
||||
podTolerationTimes[namespacedName] = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := podTolerationTimes[namespacedName]; !ok {
|
||||
podTolerationTimes[namespacedName] = 0
|
||||
}
|
||||
}
|
||||
neverEvictedPods := []string{}
|
||||
maxTolerationTime := time.Duration(0)
|
||||
for podName, tolerationTime := range podTolerationTimes {
|
||||
if tolerationTime < 0 {
|
||||
neverEvictedPods = append(neverEvictedPods, podName)
|
||||
} else {
|
||||
if tolerationTime > maxTolerationTime {
|
||||
maxTolerationTime = tolerationTime
|
||||
}
|
||||
}
|
||||
}
|
||||
framework.Logf(
|
||||
"Only %v should be running after partition. Maximum TolerationSeconds among other Pods is %v",
|
||||
neverEvictedPods,
|
||||
maxTolerationTime,
|
||||
)
|
||||
|
||||
By("Set up watch on node status")
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
|
||||
stopCh := make(chan struct{})
|
||||
newNode := make(chan *v1.Node)
|
||||
var controller cache.Controller
|
||||
_, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
return f.ClientSet.CoreV1().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Node{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
Expect(ok).To(Equal(true))
|
||||
newNode <- n
|
||||
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// Will not explicitly close newNode channel here due to
|
||||
// race condition where stopCh and newNode are closed but informer onUpdate still executes.
|
||||
close(stopCh)
|
||||
}()
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
By("Expect to observe node status change from NotReady to Ready after network connectivity recovers")
|
||||
expectNodeReadiness(true, newNode)
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate)
|
||||
}))
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
|
||||
}
|
||||
|
||||
sleepTime := maxTolerationTime + 20*time.Second
|
||||
By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime))
|
||||
time.Sleep(sleepTime)
|
||||
pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
seenRunning := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
namespacedName := fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
|
||||
shouldBeTerminating := true
|
||||
for _, neverEvictedPod := range neverEvictedPods {
|
||||
if neverEvictedPod == namespacedName {
|
||||
shouldBeTerminating = false
|
||||
}
|
||||
}
|
||||
if pod.DeletionTimestamp == nil {
|
||||
seenRunning = append(seenRunning, namespacedName)
|
||||
if shouldBeTerminating {
|
||||
framework.Failf("Pod %v should have been deleted but was seen running", namespacedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, neverEvictedPod := range neverEvictedPods {
|
||||
running := false
|
||||
for _, runningPod := range seenRunning {
|
||||
if runningPod == neverEvictedPod {
|
||||
running = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !running {
|
||||
framework.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
328
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
Normal file
328
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
Normal file
@ -0,0 +1,328 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("ReplicationController", func() {
|
||||
f := framework.NewDefaultFramework("replication-controller")
|
||||
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
|
||||
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
It("should serve a basic image on each replica with a private image", func() {
|
||||
// requires private images
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.ServeHostname
|
||||
privateimage.SetRegistry(imageutils.PrivateRegistry)
|
||||
TestReplicationControllerServeImageOrFail(f, "private", imageutils.GetE2EImage(privateimage))
|
||||
})
|
||||
|
||||
It("should surface a failure condition on a common issue like exceeded quota", func() {
|
||||
testReplicationControllerConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation", func() {
|
||||
testRCAdoptMatchingOrphans(f)
|
||||
})
|
||||
|
||||
It("should release no longer matching pods", func() {
|
||||
testRCReleaseControlledNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
||||
func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *v1.ReplicationController {
|
||||
zero := int64(0)
|
||||
return &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rcPodLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// A basic test to check the deployment of an image using
|
||||
// a replication controller. The image serves its hostname
|
||||
// which is checked for each replica.
|
||||
func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
||||
// Create a replication controller for a service
|
||||
// that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that pods for the new RC were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
err = f.WaitForPodRunning(pod.Name)
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if running != replicas {
|
||||
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Create a quota restricting pods in the current namespace to 2.
|
||||
// 2. Create a replication controller that wants to run 3 pods.
|
||||
// 3. Check replication controller conditions for a ReplicaFailure condition.
|
||||
// 4. Relax quota or scale down the controller and observe the condition is gone.
|
||||
func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
podQuota := quota.Status.Hard[v1.ResourcePods]
|
||||
quantity := resource.MustParse("2")
|
||||
return (&podQuota).Cmp(quantity) == 0, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("resource quota %q never synced", name)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
||||
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
|
||||
generation := rc.Generation
|
||||
conditions := rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rc.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rc.Status.Conditions
|
||||
|
||||
cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
|
||||
return cond != nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
|
||||
rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
|
||||
generation = rc.Generation
|
||||
conditions = rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rc.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rc.Status.Conditions
|
||||
|
||||
cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
|
||||
return cond == nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
name := "pod-adoption"
|
||||
By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: NginxImageName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("When a replication controller with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the RC
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rc.UID {
|
||||
// pod adopted
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
// pod still not adopted
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||
name := "pod-release"
|
||||
By("Given a ReplicationController is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
p := pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||
if err != nil && errors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rc.UID {
|
||||
// pod still belonging to the replication controller
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// pod already released
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
327
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
Normal file
327
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
Normal file
@ -0,0 +1,327 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
||||
zero := int64(0)
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rsPodLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPodQuota(name, number string) *v1.ResourceQuota {
|
||||
return &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse(number),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("ReplicaSet", func() {
|
||||
f := framework.NewDefaultFramework("replicaset")
|
||||
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
|
||||
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
It("should serve a basic image on each replica with a private image", func() {
|
||||
// requires private images
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.ServeHostname
|
||||
privateimage.SetRegistry(imageutils.PrivateRegistry)
|
||||
testReplicaSetServeImageOrFail(f, "private", imageutils.GetE2EImage(privateimage))
|
||||
})
|
||||
|
||||
It("should surface a failure condition on a common issue like exceeded quota", func() {
|
||||
testReplicaSetConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation and release no longer matching pods", func() {
|
||||
testRSAdoptMatchingAndReleaseNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
||||
// A basic test to check the deployment of an image using a ReplicaSet. The
|
||||
// image serves its hostname which is checked for each replica.
|
||||
func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
||||
// Create a ReplicaSet for a service that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that pods for the new RS were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring a pod for ReplicaSet %q is running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
err = f.WaitForPodRunning(pod.Name)
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if running != replicas {
|
||||
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Create a quota restricting pods in the current namespace to 2.
|
||||
// 2. Create a replica set that wants to run 3 pods.
|
||||
// 3. Check replica set conditions for a ReplicaFailure condition.
|
||||
// 4. Scale down the replica set and observe the condition is gone.
|
||||
func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
quantity := resource.MustParse("2")
|
||||
podQuota := quota.Status.Hard[v1.ResourcePods]
|
||||
return (&podQuota).Cmp(quantity) == 0, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("resource quota %q never synced", name)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||
generation := rs.Generation
|
||||
conditions := rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rs.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
return cond != nil, nil
|
||||
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *extensions.ReplicaSet) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has no failure condition set", name))
|
||||
generation = rs.Generation
|
||||
conditions = rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rs.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
return cond == nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
name := "pod-adoption-release"
|
||||
By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: NginxImageName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("When a replicaset with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the ReplicaSet
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rs.UID {
|
||||
// pod adopted
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
// pod still not adopted
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
p = &pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||
if err != nil && errors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rs.UID {
|
||||
// pod still belonging to the replicaset
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// pod already released
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
1136
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
Normal file
1136
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
37
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
NginxImageName = "nginx"
|
||||
RedisImageName = "redis"
|
||||
)
|
||||
|
||||
var (
|
||||
CronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}
|
||||
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
|
||||
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
|
||||
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
|
||||
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
|
||||
)
|
Reference in New Issue
Block a user