mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
Fresh dep ensure
This commit is contained in:
71
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
@ -17,25 +17,25 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -49,22 +49,23 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/job/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/job/OWNERS
generated
vendored
@ -6,3 +6,5 @@ reviewers:
|
||||
- erictune
|
||||
- janetkuo
|
||||
- soltysh
|
||||
labels:
|
||||
- sig/apps
|
||||
|
46
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
46
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
@ -45,7 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const statusUpdateRetries = 3
|
||||
@ -91,7 +91,7 @@ type JobController struct {
|
||||
|
||||
func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@ -140,8 +140,8 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer jm.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting job controller")
|
||||
defer glog.Infof("Shutting down job controller")
|
||||
klog.Infof("Starting job controller")
|
||||
defer klog.Infof("Shutting down job controller")
|
||||
|
||||
if !controller.WaitForCacheSync("job", stopCh, jm.podStoreSynced, jm.jobStoreSynced) {
|
||||
return
|
||||
@ -320,7 +320,7 @@ func (jm *JobController) deletePod(obj interface{}) {
|
||||
}
|
||||
|
||||
func (jm *JobController) updateJob(old, cur interface{}) {
|
||||
oldJob := cur.(*batch.Job)
|
||||
oldJob := old.(*batch.Job)
|
||||
curJob := cur.(*batch.Job)
|
||||
|
||||
// never return error
|
||||
@ -343,7 +343,7 @@ func (jm *JobController) updateJob(old, cur interface{}) {
|
||||
total := time.Duration(*curADS) * time.Second
|
||||
// AddAfter will handle total < passed
|
||||
jm.queue.AddAfter(key, total-passed)
|
||||
glog.V(4).Infof("job ActiveDeadlineSeconds updated, will rsync after %d seconds", total-passed)
|
||||
klog.V(4).Infof("job ActiveDeadlineSeconds updated, will rsync after %d seconds", total-passed)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -436,7 +436,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
|
||||
func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
ns, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -449,7 +449,7 @@ func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
sharedJob, err := jm.jobLister.Jobs(ns).Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("Job has been deleted: %v", key)
|
||||
klog.V(4).Infof("Job has been deleted: %v", key)
|
||||
jm.expectations.DeleteExpectations(key)
|
||||
return true, nil
|
||||
}
|
||||
@ -485,7 +485,7 @@ func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
job.Status.StartTime = &now
|
||||
// enqueue a sync to check if job past ActiveDeadlineSeconds
|
||||
if job.Spec.ActiveDeadlineSeconds != nil {
|
||||
glog.V(4).Infof("Job %s have ActiveDeadlineSeconds will sync after %d seconds",
|
||||
klog.V(4).Infof("Job %s have ActiveDeadlineSeconds will sync after %d seconds",
|
||||
key, *job.Spec.ActiveDeadlineSeconds)
|
||||
jm.queue.AddAfter(key, time.Duration(*job.Spec.ActiveDeadlineSeconds)*time.Second)
|
||||
}
|
||||
@ -614,7 +614,7 @@ func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh cha
|
||||
defer wait.Done()
|
||||
if err := jm.podControl.DeletePod(job.Namespace, pods[ix].Name, job); err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
glog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name)
|
||||
errCh <- err
|
||||
}
|
||||
}(i)
|
||||
@ -643,6 +643,9 @@ func pastBackoffLimitOnFailure(job *batch.Job, pods []*v1.Pod) bool {
|
||||
result += stat.RestartCount
|
||||
}
|
||||
}
|
||||
if *job.Spec.BackoffLimit == 0 {
|
||||
return result > 0
|
||||
}
|
||||
return result >= *job.Spec.BackoffLimit
|
||||
}
|
||||
|
||||
@ -694,7 +697,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
diff := active - parallelism
|
||||
errCh = make(chan error, diff)
|
||||
jm.expectations.ExpectDeletions(jobKey, int(diff))
|
||||
glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
|
||||
klog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
|
||||
// Sort the pods in the order such that not-ready < ready, unscheduled
|
||||
// < scheduled, and pending < running. This ensures that we delete pods
|
||||
// in the earlier stages whenever possible.
|
||||
@ -709,7 +712,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
// Decrement the expected number of deletes because the informer won't observe this deletion
|
||||
glog.V(2).Infof("Failed to delete %v, decrementing expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Failed to delete %v, decrementing expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name)
|
||||
jm.expectations.DeletionObserved(jobKey)
|
||||
activeLock.Lock()
|
||||
active++
|
||||
@ -746,7 +749,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
}
|
||||
jm.expectations.ExpectCreations(jobKey, int(diff))
|
||||
errCh = make(chan error, diff)
|
||||
glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff)
|
||||
klog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff)
|
||||
|
||||
active += diff
|
||||
wait := sync.WaitGroup{}
|
||||
@ -779,7 +782,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
if err != nil {
|
||||
defer utilruntime.HandleError(err)
|
||||
// Decrement the expected number of creates because the informer won't observe this pod
|
||||
glog.V(2).Infof("Failed creation, decrementing expectations for job %q/%q", job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Failed creation, decrementing expectations for job %q/%q", job.Namespace, job.Name)
|
||||
jm.expectations.CreationObserved(jobKey)
|
||||
activeLock.Lock()
|
||||
active--
|
||||
@ -792,7 +795,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b
|
||||
// any skipped pods that we never attempted to start shouldn't be expected.
|
||||
skippedPods := diff - batchSize
|
||||
if errorCount < len(errCh) && skippedPods > 0 {
|
||||
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for job %q/%q", skippedPods, job.Namespace, job.Name)
|
||||
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for job %q/%q", skippedPods, job.Namespace, job.Name)
|
||||
active -= skippedPods
|
||||
for i := int32(0); i < skippedPods; i++ {
|
||||
// Decrement the expected number of creates because the informer won't observe this pod
|
||||
@ -866,16 +869,3 @@ func filterPods(pods []*v1.Pod, phase v1.PodPhase) int {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []batch.Job
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o byCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
54
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller_test.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller_test.go
generated
vendored
@ -40,12 +40,14 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
)
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func newJob(parallelism, completions, backoffLimit int32) *batch.Job {
|
||||
j := &batch.Job{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
UID: uuid.NewUUID(),
|
||||
@ -86,15 +88,6 @@ func newJob(parallelism, completions, backoffLimit int32) *batch.Job {
|
||||
return j
|
||||
}
|
||||
|
||||
func getKey(job *batch.Job, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(job); err != nil {
|
||||
t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err)
|
||||
return ""
|
||||
} else {
|
||||
return key
|
||||
}
|
||||
}
|
||||
|
||||
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
|
||||
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod())
|
||||
jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient)
|
||||
@ -301,7 +294,7 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
|
||||
// We need requeue syncJob task if podController error
|
||||
if tc.podControllerError != nil {
|
||||
@ -388,7 +381,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
failedPods int32
|
||||
|
||||
// expectations
|
||||
expectedForgetKey bool
|
||||
expectedForGetKey bool
|
||||
expectedDeletions int32
|
||||
expectedActive int32
|
||||
expectedSucceeded int32
|
||||
@ -441,12 +434,12 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error when syncing jobs %v", name, err)
|
||||
}
|
||||
if forget != tc.expectedForgetKey {
|
||||
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.expectedForgetKey, forget)
|
||||
if forget != tc.expectedForGetKey {
|
||||
t.Errorf("%s: unexpected forget value. Expected %v, saw %v\n", name, tc.expectedForGetKey, forget)
|
||||
}
|
||||
// validate created/deleted pods
|
||||
if int32(len(fakePodControl.Templates)) != 0 {
|
||||
@ -504,7 +497,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
||||
job.Status.StartTime = &start
|
||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||
}
|
||||
@ -533,7 +526,7 @@ func TestSyncJobComplete(t *testing.T) {
|
||||
job := newJob(1, 1, 6)
|
||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobComplete, "", ""))
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when syncing jobs %v", err)
|
||||
}
|
||||
@ -559,7 +552,7 @@ func TestSyncJobDeleted(t *testing.T) {
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
manager.updateHandler = func(job *batch.Job) error { return nil }
|
||||
job := newJob(2, 2, 6)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when syncing jobs %v", err)
|
||||
}
|
||||
@ -584,12 +577,12 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
updateError := fmt.Errorf("Update error")
|
||||
manager.updateHandler = func(job *batch.Job) error {
|
||||
manager.queue.AddRateLimited(getKey(job, t))
|
||||
manager.queue.AddRateLimited(testutil.GetKey(job, t))
|
||||
return updateError
|
||||
}
|
||||
job := newJob(2, 2, 6)
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
if err == nil || err != updateError {
|
||||
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
|
||||
}
|
||||
@ -598,7 +591,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
}
|
||||
t.Log("Waiting for a job in the queue")
|
||||
key, _ := manager.queue.Get()
|
||||
expectedKey := getKey(job, t)
|
||||
expectedKey := testutil.GetKey(job, t)
|
||||
if key != expectedKey {
|
||||
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
|
||||
}
|
||||
@ -1160,7 +1153,7 @@ func TestSyncJobExpectations(t *testing.T) {
|
||||
podIndexer.Add(&pods[1])
|
||||
},
|
||||
}
|
||||
manager.syncJob(getKey(job, t))
|
||||
manager.syncJob(testutil.GetKey(job, t))
|
||||
if len(fakePodControl.Templates) != 0 {
|
||||
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
|
||||
}
|
||||
@ -1314,7 +1307,7 @@ func TestJobBackoffReset(t *testing.T) {
|
||||
|
||||
// job & pods setup
|
||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
||||
key := getKey(job, t)
|
||||
key := testutil.GetKey(job, t)
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||
|
||||
@ -1429,6 +1422,21 @@ func TestJobBackoffForOnFailure(t *testing.T) {
|
||||
expectedCondition *batch.JobConditionType
|
||||
expectedConditionReason string
|
||||
}{
|
||||
"backoffLimit 0 should have 1 pod active": {
|
||||
1, 1, 0,
|
||||
true, []int32{0},
|
||||
1, 0, 0, nil, "",
|
||||
},
|
||||
"backoffLimit 1 with restartCount 0 should have 1 pod active": {
|
||||
1, 1, 1,
|
||||
true, []int32{0},
|
||||
1, 0, 0, nil, "",
|
||||
},
|
||||
"backoffLimit 1 with restartCount 1 should have 0 pod active": {
|
||||
1, 1, 1,
|
||||
true, []int32{1},
|
||||
0, 0, 1, &jobConditionFailed, "BackoffLimitExceeded",
|
||||
},
|
||||
"too many job failures - single pod": {
|
||||
1, 5, 2,
|
||||
true, []int32{2},
|
||||
@ -1472,7 +1480,7 @@ func TestJobBackoffForOnFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
forget, err := manager.syncJob(testutil.GetKey(job, t))
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error syncing job. Got %#v", err)
|
||||
|
Reference in New Issue
Block a user