mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
25
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -111,11 +110,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("delete a job")
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
@ -179,7 +174,8 @@ var _ = SIGDescribe("Job", func() {
|
||||
|
||||
It("should exceed backoffLimit", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, 0)
|
||||
backoff := 1
|
||||
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Ensuring job exceed backofflimit")
|
||||
@ -187,11 +183,18 @@ var _ = SIGDescribe("Job", func() {
|
||||
err = framework.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, framework.JobTimeout, "BackoffLimitExceeded")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that only one pod created and status is failed")
|
||||
By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(1))
|
||||
pod := pods.Items[0]
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
// Expect(pods.Items).To(HaveLen(backoff + 1))
|
||||
// due to NumRequeus not being stable enough, especially with failed status
|
||||
// updates we need to allow more than backoff+1
|
||||
// TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
|
||||
if len(pods.Items) < backoff+1 {
|
||||
framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
Reference in New Issue
Block a user