vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

75
vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD generated vendored Normal file
View File

@ -0,0 +1,75 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cronjob_controller.go",
"doc.go",
"injection.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/cronjob",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/robfig/cron:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"cronjob_controller_test.go",
"utils_test.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/cronjob",
library = ":go_default_library",
deps = [
"//pkg/apis/batch/install:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

8
vendor/k8s.io/kubernetes/pkg/controller/cronjob/OWNERS generated vendored Executable file
View File

@ -0,0 +1,8 @@
approvers:
- erictune
- janetkuo
- soltysh
reviewers:
- erictune
- janetkuo
- soltysh

View File

@ -0,0 +1,418 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
/*
I did not use watch or expectations. Those add a lot of corner cases, and we aren't
expecting a large volume of jobs or scheduledJobs. (We are favoring correctness
over scalability. If we find a single controller thread is too slow because
there are a lot of Jobs or CronJobs, we we can parallelize by Namespace.
If we find the load on the API server is too high, we can use a watch and
UndeltaStore.)
Just periodically list jobs and SJs, and then reconcile them.
*/
import (
"fmt"
"sort"
"time"
"github.com/golang/glog"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/util/metrics"
)
// Utilities for dealing with Jobs and CronJobs and time.
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = batchv1beta1.SchemeGroupVersion.WithKind("CronJob")
type CronJobController struct {
kubeClient clientset.Interface
jobControl jobControlInterface
sjControl sjControlInterface
podControl podControlInterface
recorder record.EventRecorder
}
func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
jm := &CronJobController{
kubeClient: kubeClient,
jobControl: realJobControl{KubeClient: kubeClient},
sjControl: &realSJControl{KubeClient: kubeClient},
podControl: &realPodControl{KubeClient: kubeClient},
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cronjob-controller"}),
}
return jm, nil
}
func NewCronJobControllerFromClient(kubeClient clientset.Interface) (*CronJobController, error) {
jm, err := NewCronJobController(kubeClient)
if err != nil {
return nil, err
}
return jm, nil
}
// Run the main goroutine responsible for watching and syncing jobs.
func (jm *CronJobController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting CronJob Manager")
// Check things every 10 second.
go wait.Until(jm.syncAll, 10*time.Second, stopCh)
<-stopCh
glog.Infof("Shutting down CronJob Manager")
}
// syncAll lists all the CronJobs and Jobs and reconciles them.
func (jm *CronJobController) syncAll() {
// List children (Jobs) before parents (CronJob).
// This guarantees that if we see any Job that got orphaned by the GC orphan finalizer,
// we must also see that the parent CronJob has non-nil DeletionTimestamp (see #42639).
// Note that this only works because we are NOT using any caches here.
jl, err := jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
utilruntime.HandleError(fmt.Errorf("can't list Jobs: %v", err))
return
}
js := jl.Items
glog.V(4).Infof("Found %d jobs", len(js))
sjl, err := jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
utilruntime.HandleError(fmt.Errorf("can't list CronJobs: %v", err))
return
}
sjs := sjl.Items
glog.V(4).Infof("Found %d cronjobs", len(sjs))
jobsBySj := groupJobsByParent(js)
glog.V(4).Infof("Found %d groups", len(jobsBySj))
for _, sj := range sjs {
syncOne(&sj, jobsBySj[sj.UID], time.Now(), jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
cleanupFinishedJobs(&sj, jobsBySj[sj.UID], jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
}
}
// cleanupFinishedJobs cleanups finished jobs created by a CronJob
func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobControlInterface,
sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
// If neither limits are active, there is no need to do anything.
if sj.Spec.FailedJobsHistoryLimit == nil && sj.Spec.SuccessfulJobsHistoryLimit == nil {
return
}
failedJobs := []batchv1.Job{}
succesfulJobs := []batchv1.Job{}
for _, job := range js {
isFinished, finishedStatus := getFinishedStatus(&job)
if isFinished && finishedStatus == batchv1.JobComplete {
succesfulJobs = append(succesfulJobs, job)
} else if isFinished && finishedStatus == batchv1.JobFailed {
failedJobs = append(failedJobs, job)
}
}
if sj.Spec.SuccessfulJobsHistoryLimit != nil {
removeOldestJobs(sj,
succesfulJobs,
jc,
pc,
*sj.Spec.SuccessfulJobsHistoryLimit,
recorder)
}
if sj.Spec.FailedJobsHistoryLimit != nil {
removeOldestJobs(sj,
failedJobs,
jc,
pc,
*sj.Spec.FailedJobsHistoryLimit,
recorder)
}
// Update the CronJob, in case jobs were removed from the list.
if _, err := sjc.UpdateStatus(sj); err != nil {
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
}
// removeOldestJobs removes the oldest jobs from a list of jobs
func removeOldestJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobControlInterface,
pc podControlInterface, maxJobs int32, recorder record.EventRecorder) {
numToDelete := len(js) - int(maxJobs)
if numToDelete <= 0 {
return
}
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
glog.V(4).Infof("Cleaning up %d/%d jobs from %s", numToDelete, len(js), nameForLog)
sort.Sort(byJobStartTime(js))
for i := 0; i < numToDelete; i++ {
glog.V(4).Infof("Removing job %s from %s", js[i].Name, nameForLog)
deleteJob(sj, &js[i], jc, pc, recorder, "history limit reached")
}
}
// syncOne reconciles a CronJob with a list of any Jobs that it created.
// All known jobs created by "sj" should be included in "js".
// The current time is passed in to facilitate testing.
// It has no receiver, to facilitate testing.
func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
childrenJobs := make(map[types.UID]bool)
for _, j := range js {
childrenJobs[j.ObjectMeta.UID] = true
found := inActiveList(*sj, j.ObjectMeta.UID)
if !found && !IsJobFinished(&j) {
recorder.Eventf(sj, v1.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
// We found an unfinished job that has us as the parent, but it is not in our Active list.
// This could happen if we crashed right after creating the Job and before updating the status,
// or if our jobs list is newer than our sj status after a relist, or if someone intentionally created
// a job that they wanted us to adopt.
// TODO: maybe handle the adoption case? Concurrency/suspend rules will not apply in that case, obviously, since we can't
// stop users from creating jobs if they have permission. It is assumed that if a
// user has permission to create a job within a namespace, then they have permission to make any scheduledJob
// in the same namespace "adopt" that job. ReplicaSets and their Pods work the same way.
// TBS: how to update sj.Status.LastScheduleTime if the adopted job is newer than any we knew about?
} else if found && IsJobFinished(&j) {
deleteFromActiveList(sj, j.ObjectMeta.UID)
// TODO: event to call out failure vs success.
recorder.Eventf(sj, v1.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
}
}
// Remove any job reference from the active list if the corresponding job does not exist any more.
// Otherwise, the cronjob may be stuck in active mode forever even though there is no matching
// job running.
for _, j := range sj.Status.Active {
if found := childrenJobs[j.UID]; !found {
recorder.Eventf(sj, v1.EventTypeNormal, "MissingJob", "Active job went missing: %v", j.Name)
deleteFromActiveList(sj, j.UID)
}
}
updatedSJ, err := sjc.UpdateStatus(sj)
if err != nil {
glog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
return
}
*sj = *updatedSJ
if sj.DeletionTimestamp != nil {
// The CronJob is being deleted.
// Don't do anything other than updating status.
return
}
if sj.Spec.Suspend != nil && *sj.Spec.Suspend {
glog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog)
return
}
times, err := getRecentUnmetScheduleTimes(*sj, now)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedNeedsStart", "Cannot determine if job needs to be started: %v", err)
glog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err)
return
}
// TODO: handle multiple unmet start times, from oldest to newest, updating status as needed.
if len(times) == 0 {
glog.V(4).Infof("No unmet start times for %s", nameForLog)
return
}
if len(times) > 1 {
glog.V(4).Infof("Multiple unmet start times for %s so only starting last one", nameForLog)
}
scheduledTime := times[len(times)-1]
tooLate := false
if sj.Spec.StartingDeadlineSeconds != nil {
tooLate = scheduledTime.Add(time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds)).Before(now)
}
if tooLate {
glog.V(4).Infof("Missed starting window for %s", nameForLog)
// TODO: generate an event for a miss. Use a warning level event because it indicates a
// problem with the controller (restart or long queue), and is not expected by user either.
// Since we don't set LastScheduleTime when not scheduling, we are going to keep noticing
// the miss every cycle. In order to avoid sending multiple events, and to avoid processing
// the sj again and again, we could set a Status.LastMissedTime when we notice a miss.
// Then, when we call getRecentUnmetScheduleTimes, we can take max(creationTimestamp,
// Status.LastScheduleTime, Status.LastMissedTime), and then so we won't generate
// and event the next time we process it, and also so the user looking at the status
// can see easily that there was a missed execution.
return
}
if sj.Spec.ConcurrencyPolicy == batchv1beta1.ForbidConcurrent && len(sj.Status.Active) > 0 {
// Regardless which source of information we use for the set of active jobs,
// there is some risk that we won't see an active job when there is one.
// (because we haven't seen the status update to the SJ or the created pod).
// So it is theoretically possible to have concurrency with Forbid.
// As long the as the invokations are "far enough apart in time", this usually won't happen.
//
// TODO: for Forbid, we could use the same name for every execution, as a lock.
// With replace, we could use a name that is deterministic per execution time.
// But that would mean that you could not inspect prior successes or failures of Forbid jobs.
glog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog)
return
}
if sj.Spec.ConcurrencyPolicy == batchv1beta1.ReplaceConcurrent {
for _, j := range sj.Status.Active {
// TODO: this should be replaced with server side job deletion
// currently this mimics JobReaper from pkg/kubectl/stop.go
glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
job, err := jc.GetJob(j.Namespace, j.Name)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedGet", "Get job: %v", err)
return
}
if !deleteJob(sj, job, jc, pc, recorder, "") {
return
}
}
}
jobReq, err := getJobFromTemplate(sj, scheduledTime)
if err != nil {
glog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err)
return
}
jobResp, err := jc.CreateJob(sj.Namespace, jobReq)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
return
}
glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
// ------------------------------------------------------------------ //
// If this process restarts at this point (after posting a job, but
// before updating the status), then we might try to start the job on
// the next time. Actually, if we relist the SJs and Jobs on the next
// iteration of syncAll, we might not see our own status update, and
// then post one again. So, we need to use the job name as a lock to
// prevent us from making the job twice (name the job with hash of its
// scheduled time).
// Add the just-started job to the status list.
ref, err := getRef(jobResp)
if err != nil {
glog.V(2).Infof("Unable to make object reference for job for %s", nameForLog)
} else {
sj.Status.Active = append(sj.Status.Active, *ref)
}
sj.Status.LastScheduleTime = &metav1.Time{Time: scheduledTime}
if _, err := sjc.UpdateStatus(sj); err != nil {
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
return
}
// deleteJob reaps a job, deleting the job, the pobs and the reference in the active list
func deleteJob(sj *batchv1beta1.CronJob, job *batchv1.Job, jc jobControlInterface,
pc podControlInterface, recorder record.EventRecorder, reason string) bool {
// TODO: this should be replaced with server side job deletion
// currencontinuetly this mimics JobReaper from pkg/kubectl/stop.go
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
// scale job down to 0
if *job.Spec.Parallelism != 0 {
zero := int32(0)
var err error
job.Spec.Parallelism = &zero
job, err = jc.UpdateJob(job.Namespace, job)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
return false
}
}
// remove all pods...
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := pc.ListPods(job.Namespace, options)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
return false
}
errList := []error{}
for _, pod := range podList.Items {
glog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name)
if err := pc.DeletePod(pod.Namespace, pod.Name); err != nil {
// ignores the error when the pod isn't found
if !errors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
if len(errList) != 0 {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
return false
}
// ... the job itself...
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
return false
}
// ... and its reference from active list
deleteFromActiveList(sj, job.ObjectMeta.UID)
recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", job.Name)
return true
}
func getRef(object runtime.Object) (*v1.ObjectReference, error) {
return ref.GetReference(scheme.Scheme, object)
}

View File

@ -0,0 +1,782 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"errors"
"strconv"
"strings"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
batchV1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
// For the cronjob controller to do conversions.
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
)
var (
// schedule is hourly on the hour
onTheHour string = "0 * * * ?"
errorSchedule string = "obvious error schedule"
)
func justBeforeTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T09:59:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func topOfTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:00:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justAfterTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:01:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func weekAfterTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-26T10:00:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justBeforeThePriorHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T08:59:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justAfterThePriorHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T09:01:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func startTimeStringToTime(startTime string) time.Time {
T1, err := time.Parse(time.RFC3339, startTime)
if err != nil {
panic("test setup error")
}
return T1
}
// returns a cronJob with some fields filled in.
func cronJob() batchV1beta1.CronJob {
return batchV1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: "snazzycats",
UID: types.UID("1a2b3c"),
SelfLink: "/apis/batch/v1beta1/namespaces/snazzycats/cronjobs/mycronjob",
CreationTimestamp: metav1.Time{Time: justBeforeTheHour()},
},
Spec: batchV1beta1.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batchV1beta1.AllowConcurrent,
JobTemplate: batchV1beta1.JobTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"},
},
Spec: jobSpec(),
},
},
}
}
func jobSpec() batchv1.JobSpec {
one := int32(1)
return batchv1.JobSpec{
Parallelism: &one,
Completions: &one,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
}
}
func newJob(UID string) batchv1.Job {
return batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(UID),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob",
},
Spec: jobSpec(),
}
}
var (
shortDead int64 = 10
mediumDead int64 = 2 * 60 * 60
longDead int64 = 1000000
noDead int64 = -12345
A batchV1beta1.ConcurrencyPolicy = batchV1beta1.AllowConcurrent
f batchV1beta1.ConcurrencyPolicy = batchV1beta1.ForbidConcurrent
R batchV1beta1.ConcurrencyPolicy = batchV1beta1.ReplaceConcurrent
T bool = true
F bool = false
)
func TestSyncOne_RunOrNot(t *testing.T) {
// Check expectations on deadline parameters
if shortDead/60/60 >= 1 {
t.Errorf("shortDead should be less than one hour")
}
if mediumDead/60/60 < 1 || mediumDead/60/60 >= 24 {
t.Errorf("mediumDead should be between one hour and one day")
}
if longDead/60/60/24 < 10 {
t.Errorf("longDead should be at least ten days")
}
testCases := map[string]struct {
// sj spec
concurrencyPolicy batchV1beta1.ConcurrencyPolicy
suspend bool
schedule string
deadline int64
// sj status
ranPreviously bool
stillActive bool
// environment
now time.Time
// expectations
expectCreate bool
expectDelete bool
expectActive int
expectedWarnings int
}{
"never ran, not valid schedule, A": {A, F, errorSchedule, noDead, F, F, justBeforeTheHour(), F, F, 0, 1},
"never ran, not valid schedule, F": {f, F, errorSchedule, noDead, F, F, justBeforeTheHour(), F, F, 0, 1},
"never ran, not valid schedule, R": {f, F, errorSchedule, noDead, F, F, justBeforeTheHour(), F, F, 0, 1},
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, 0, 0},
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, 0, 0},
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
"prev ran but done, not time, F": {f, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
"prev ran but done, not time, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, 0, 0},
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, 0, 0},
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, F, 1, 0},
"still active, not time, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
"still active, not time, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
"still active, not time, R": {R, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
"still active, is time, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, 2, 0},
"still active, is time, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1, 0},
"still active, is time, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, 1, 0},
"still active, is time, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1, 0},
"still active, is time, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, 1, 0},
"still active, is time, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, F, 2, 0},
// Controller should fail to schedule these, as there are too many missed starting times
// and either no deadline or a too long deadline.
"prev ran but done, long overdue, not past deadline, A": {A, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, not past deadline, R": {R, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, not past deadline, F": {f, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, no deadline, A": {A, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, no deadline, R": {R, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, no deadline, F": {f, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, past medium deadline, A": {A, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past short deadline, A": {A, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past medium deadline, R": {R, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past short deadline, R": {R, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past medium deadline, F": {f, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past short deadline, F": {f, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
}
for name, tc := range testCases {
sj := cronJob()
sj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy
sj.Spec.Suspend = &tc.suspend
sj.Spec.Schedule = tc.schedule
if tc.deadline != noDead {
sj.Spec.StartingDeadlineSeconds = &tc.deadline
}
var (
job *batchv1.Job
err error
)
js := []batchv1.Job{}
if tc.ranPreviously {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()}
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
job, err = getJobFromTemplate(&sj, sj.Status.LastScheduleTime.Time)
if err != nil {
t.Fatalf("%s: nexpected error creating a job from template: %v", name, err)
}
job.UID = "1234"
job.Namespace = ""
if tc.stillActive {
sj.Status.Active = []v1.ObjectReference{{UID: job.UID}}
js = append(js, *job)
}
} else {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
if tc.stillActive {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
}
jc := &fakeJobControl{Job: job}
sjc := &fakeSJControl{}
pc := &fakePodControl{}
recorder := record.NewFakeRecorder(10)
syncOne(&sj, js, tc.now, jc, sjc, pc, recorder)
expectedCreates := 0
if tc.expectCreate {
expectedCreates = 1
}
if len(jc.Jobs) != expectedCreates {
t.Errorf("%s: expected %d job started, actually %v", name, expectedCreates, len(jc.Jobs))
}
for i := range jc.Jobs {
job := &jc.Jobs[i]
controllerRef := metav1.GetControllerOf(job)
if controllerRef == nil {
t.Errorf("%s: expected job to have ControllerRef: %#v", name, job)
} else {
if got, want := controllerRef.APIVersion, "batch/v1beta1"; got != want {
t.Errorf("%s: controllerRef.APIVersion = %q, want %q", name, got, want)
}
if got, want := controllerRef.Kind, "CronJob"; got != want {
t.Errorf("%s: controllerRef.Kind = %q, want %q", name, got, want)
}
if got, want := controllerRef.Name, sj.Name; got != want {
t.Errorf("%s: controllerRef.Name = %q, want %q", name, got, want)
}
if got, want := controllerRef.UID, sj.UID; got != want {
t.Errorf("%s: controllerRef.UID = %q, want %q", name, got, want)
}
if controllerRef.Controller == nil || *controllerRef.Controller != true {
t.Errorf("%s: controllerRef.Controller is not set to true", name)
}
}
}
expectedDeletes := 0
if tc.expectDelete {
expectedDeletes = 1
}
if len(jc.DeleteJobName) != expectedDeletes {
t.Errorf("%s: expected %d job deleted, actually %v", name, expectedDeletes, len(jc.DeleteJobName))
}
// Status update happens once when ranging through job list, and another one if create jobs.
expectUpdates := 1
expectedEvents := 0
if tc.expectCreate {
expectedEvents++
expectUpdates++
}
if tc.expectDelete {
expectedEvents++
}
expectedEvents += tc.expectedWarnings
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events))
}
numWarnings := 0
for i := 1; i <= len(recorder.Events); i++ {
e := <-recorder.Events
if strings.HasPrefix(e, v1.EventTypeWarning) {
numWarnings += 1
}
}
if numWarnings != tc.expectedWarnings {
t.Errorf("%s: expected %d warnings, actually %v", name, tc.expectedWarnings, numWarnings)
}
if tc.expectActive != len(sjc.Updates[expectUpdates-1].Status.Active) {
t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, len(sjc.Updates[expectUpdates-1].Status.Active))
}
}
}
type CleanupJobSpec struct {
StartTime string
IsFinished bool
IsSuccessful bool
ExpectDelete bool
IsStillInActiveList bool // only when IsFinished is set
}
func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) {
limitThree := int32(3)
limitTwo := int32(2)
limitOne := int32(1)
limitZero := int32(0)
// Starting times are assumed to be sorted by increasing start time
// in all the test cases
testCases := map[string]struct {
jobSpecs []CleanupJobSpec
now time.Time
successfulJobsHistoryLimit *int32
failedJobsHistoryLimit *int32
expectActive int
}{
"success. job limit reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, T, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitTwo, &limitOne, 1},
"success. jobs not processed by Sync yet": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, T, T, T},
{"2016-05-19T06:00:00Z", T, T, F, T},
{"2016-05-19T07:00:00Z", T, T, F, T},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, T},
}, justBeforeTheHour(), &limitTwo, &limitOne, 4},
"failed job limit reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, F, T, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitTwo, &limitTwo, 0},
"success. job limit set to zero": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, T, F},
{"2016-05-19T07:00:00Z", T, T, T, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitOne, 1},
"failed job limit set to zero": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, T, F},
}, justBeforeTheHour(), &limitThree, &limitZero, 1},
"no limits reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitThree, &limitThree, 0},
// This test case should trigger the short-circuit
"limits disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), nil, nil, 0},
"success limit disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), nil, &limitThree, 0},
"failure limit disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitThree, nil, 0},
"no limits reached because still active": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", F, F, F, F},
{"2016-05-19T05:00:00Z", F, F, F, F},
{"2016-05-19T06:00:00Z", F, F, F, F},
{"2016-05-19T07:00:00Z", F, F, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", F, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitZero, 6},
"failed list pod err": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, F, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitZero, 0},
}
for name, tc := range testCases {
sj := cronJob()
suspend := false
sj.Spec.ConcurrencyPolicy = f
sj.Spec.Suspend = &suspend
sj.Spec.Schedule = onTheHour
sj.Spec.SuccessfulJobsHistoryLimit = tc.successfulJobsHistoryLimit
sj.Spec.FailedJobsHistoryLimit = tc.failedJobsHistoryLimit
var (
job *batchv1.Job
err error
)
// Set consistent timestamps for the CronJob
if len(tc.jobSpecs) != 0 {
firstTime := startTimeStringToTime(tc.jobSpecs[0].StartTime)
lastTime := startTimeStringToTime(tc.jobSpecs[len(tc.jobSpecs)-1].StartTime)
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: firstTime}
sj.Status.LastScheduleTime = &metav1.Time{Time: lastTime}
} else {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
}
// Create jobs
js := []batchv1.Job{}
jobsToDelete := sets.NewString()
sj.Status.Active = []v1.ObjectReference{}
for i, spec := range tc.jobSpecs {
job, err = getJobFromTemplate(&sj, startTimeStringToTime(spec.StartTime))
if err != nil {
t.Fatalf("%s: unexpected error creating a job from template: %v", name, err)
}
job.UID = types.UID(strconv.Itoa(i))
job.Namespace = ""
if spec.IsFinished {
var conditionType batchv1.JobConditionType
if spec.IsSuccessful {
conditionType = batchv1.JobComplete
} else {
conditionType = batchv1.JobFailed
}
condition := batchv1.JobCondition{Type: conditionType, Status: v1.ConditionTrue}
job.Status.Conditions = append(job.Status.Conditions, condition)
if spec.IsStillInActiveList {
sj.Status.Active = append(sj.Status.Active, v1.ObjectReference{UID: job.UID})
}
} else {
if spec.IsSuccessful || spec.IsStillInActiveList {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
sj.Status.Active = append(sj.Status.Active, v1.ObjectReference{UID: job.UID})
}
js = append(js, *job)
if spec.ExpectDelete {
jobsToDelete.Insert(job.Name)
}
}
jc := &fakeJobControl{Job: job}
pc := &fakePodControl{}
sjc := &fakeSJControl{}
recorder := record.NewFakeRecorder(10)
if name == "failed list pod err" {
pc.Err = errors.New("fakePodControl err")
}
cleanupFinishedJobs(&sj, js, jc, sjc, pc, recorder)
// Check we have actually deleted the correct jobs
if len(jc.DeleteJobName) != len(jobsToDelete) {
t.Errorf("%s: expected %d job deleted, actually %d", name, len(jobsToDelete), len(jc.DeleteJobName))
} else {
jcDeleteJobName := sets.NewString(jc.DeleteJobName...)
if !jcDeleteJobName.Equal(jobsToDelete) {
t.Errorf("%s: expected jobs: %v deleted, actually: %v deleted", name, jobsToDelete, jcDeleteJobName)
}
}
// Check for events
expectedEvents := len(jobsToDelete)
if name == "failed list pod err" {
expectedEvents = len(tc.jobSpecs)
}
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events))
}
// Check for jobs still in active list
numActive := 0
if len(sjc.Updates) != 0 {
numActive = len(sjc.Updates[len(sjc.Updates)-1].Status.Active)
}
if tc.expectActive != numActive {
t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, numActive)
}
}
}
// TODO: simulation where the controller randomly doesn't run, and randomly has errors starting jobs or deleting jobs,
// but over time, all jobs run as expected (assuming Allow and no deadline).
// TestSyncOne_Status tests sj.UpdateStatus in syncOne
func TestSyncOne_Status(t *testing.T) {
finishedJob := newJob("1")
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batchv1.JobCondition{Type: batchv1.JobComplete, Status: v1.ConditionTrue})
unexpectedJob := newJob("2")
missingJob := newJob("3")
testCases := map[string]struct {
// sj spec
concurrencyPolicy batchV1beta1.ConcurrencyPolicy
suspend bool
schedule string
deadline int64
// sj status
ranPreviously bool
hasFinishedJob bool
// environment
now time.Time
hasUnexpectedJob bool
hasMissingJob bool
beingDeleted bool
// expectations
expectCreate bool
expectDelete bool
}{
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F, F, F},
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F, F, F},
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F, F, F},
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, T, F},
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, T, F},
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, T, F},
"never ran, is time, deleting": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, T, F, F},
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, F, F},
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, F, F, F},
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, F, F, F},
"prev ran but done, not time, finished job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, F, F, F},
"prev ran but done, not time, unexpected job, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, F, F, F, F},
"prev ran but done, not time, missing job, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, missing job, unexpected job, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, T, F, F, F},
"prev ran but done, not time, finished job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), T, F, F, F, F},
"prev ran but done, not time, finished job, missing job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, finished job, missing job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), T, T, F, F, F},
"prev ran but done, not time, finished job, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, F, F, F},
"prev ran but done, not time, missing job, F": {f, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, finished job, missing job, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, unexpected job, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, F, F, F, F},
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, deleting": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, T, F, F},
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, finished job, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, unexpected job, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, finished job, unexpected job, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, finished job, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, unexpected job, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, finished job, unexpected job, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, F, F, T, F},
}
for name, tc := range testCases {
// Setup the test
sj := cronJob()
sj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy
sj.Spec.Suspend = &tc.suspend
sj.Spec.Schedule = tc.schedule
if tc.deadline != noDead {
sj.Spec.StartingDeadlineSeconds = &tc.deadline
}
if tc.ranPreviously {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()}
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
} else {
if tc.hasFinishedJob || tc.hasUnexpectedJob || tc.hasMissingJob {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
}
jobs := []batchv1.Job{}
if tc.hasFinishedJob {
ref, err := getRef(&finishedJob)
if err != nil {
t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err)
}
sj.Status.Active = []v1.ObjectReference{*ref}
jobs = append(jobs, finishedJob)
}
if tc.hasUnexpectedJob {
jobs = append(jobs, unexpectedJob)
}
if tc.hasMissingJob {
ref, err := getRef(&missingJob)
if err != nil {
t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err)
}
sj.Status.Active = append(sj.Status.Active, *ref)
}
if tc.beingDeleted {
timestamp := metav1.NewTime(tc.now)
sj.DeletionTimestamp = &timestamp
}
jc := &fakeJobControl{}
sjc := &fakeSJControl{}
pc := &fakePodControl{}
recorder := record.NewFakeRecorder(10)
// Run the code
syncOne(&sj, jobs, tc.now, jc, sjc, pc, recorder)
// Status update happens once when ranging through job list, and another one if create jobs.
expectUpdates := 1
// Events happens when there's unexpected / finished jobs, and upon job creation / deletion.
expectedEvents := 0
if tc.expectCreate {
expectUpdates++
expectedEvents++
}
if tc.expectDelete {
expectedEvents++
}
if tc.hasFinishedJob {
expectedEvents++
}
if tc.hasUnexpectedJob {
expectedEvents++
}
if tc.hasMissingJob {
expectedEvents++
}
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v: %#v", name, expectedEvents, len(recorder.Events), recorder.Events)
}
if expectUpdates != len(sjc.Updates) {
t.Errorf("%s: expected %d status updates, actually %d", name, expectUpdates, len(sjc.Updates))
}
if tc.hasFinishedJob && inActiveList(sjc.Updates[0], finishedJob.UID) {
t.Errorf("%s: expected finished job removed from active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.hasUnexpectedJob && inActiveList(sjc.Updates[0], unexpectedJob.UID) {
t.Errorf("%s: expected unexpected job not added to active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.hasMissingJob && inActiveList(sjc.Updates[0], missingJob.UID) {
t.Errorf("%s: expected missing job to be removed from active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.expectCreate && !sjc.Updates[1].Status.LastScheduleTime.Time.Equal(topOfTheHour()) {
t.Errorf("%s: expected LastScheduleTime updated to %s, got %s", name, topOfTheHour(), sjc.Updates[1].Status.LastScheduleTime)
}
}
}

18
vendor/k8s.io/kubernetes/pkg/controller/cronjob/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cronjob contains the controller for CronJob objects.
package cronjob

View File

@ -0,0 +1,252 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"fmt"
"sync"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
)
// sjControlInterface is an interface that knows how to update CronJob status
// created as an interface to allow testing.
type sjControlInterface interface {
UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error)
}
// realSJControl is the default implementation of sjControlInterface.
type realSJControl struct {
KubeClient clientset.Interface
}
var _ sjControlInterface = &realSJControl{}
func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(sj)
}
// fakeSJControl is the default implementation of sjControlInterface.
type fakeSJControl struct {
Updates []batchv1beta1.CronJob
}
var _ sjControlInterface = &fakeSJControl{}
func (c *fakeSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
c.Updates = append(c.Updates, *sj)
return sj, nil
}
// ------------------------------------------------------------------ //
// jobControlInterface is an interface that knows how to add or delete jobs
// created as an interface to allow testing.
type jobControlInterface interface {
// GetJob retrieves a Job.
GetJob(namespace, name string) (*batchv1.Job, error)
// CreateJob creates new Jobs according to the spec.
CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error)
// UpdateJob updates a Job.
UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error)
// PatchJob patches a Job.
PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error)
// DeleteJob deletes the Job identified by name.
// TODO: delete by UID?
DeleteJob(namespace string, name string) error
}
// realJobControl is the default implementation of jobControlInterface.
type realJobControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ jobControlInterface = &realJobControl{}
func copyLabels(template *batchv1beta1.JobTemplateSpec) labels.Set {
l := make(labels.Set)
for k, v := range template.Labels {
l[k] = v
}
return l
}
func copyAnnotations(template *batchv1beta1.JobTemplateSpec) labels.Set {
a := make(labels.Set)
for k, v := range template.Annotations {
a[k] = v
}
return a
}
func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
}
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Update(job)
}
func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Patch(name, pt, data, subresources...)
}
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Create(job)
}
func (r realJobControl) DeleteJob(namespace string, name string) error {
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, nil)
}
type fakeJobControl struct {
sync.Mutex
Job *batchv1.Job
Jobs []batchv1.Job
DeleteJobName []string
Err error
UpdateJobName []string
PatchJobName []string
Patches [][]byte
}
var _ jobControlInterface = &fakeJobControl{}
func (f *fakeJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
job.SelfLink = fmt.Sprintf("/api/batch/v1/namespaces/%s/jobs/%s", namespace, job.Name)
f.Jobs = append(f.Jobs, *job)
job.UID = "test-uid"
return job, nil
}
func (f *fakeJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
return f.Job, nil
}
func (f *fakeJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
f.UpdateJobName = append(f.UpdateJobName, job.Name)
return job, nil
}
func (f *fakeJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
f.PatchJobName = append(f.PatchJobName, name)
f.Patches = append(f.Patches, data)
// We don't have anything to return. Just return something non-nil.
return &batchv1.Job{}, nil
}
func (f *fakeJobControl) DeleteJob(namespace string, name string) error {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return f.Err
}
f.DeleteJobName = append(f.DeleteJobName, name)
return nil
}
func (f *fakeJobControl) Clear() {
f.Lock()
defer f.Unlock()
f.DeleteJobName = []string{}
f.Jobs = []batchv1.Job{}
f.Err = nil
}
// ------------------------------------------------------------------ //
// podControlInterface is an interface that knows how to list or delete pods
// created as an interface to allow testing.
type podControlInterface interface {
// ListPods list pods
ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error)
// DeleteJob deletes the pod identified by name.
// TODO: delete by UID?
DeletePod(namespace string, name string) error
}
// realPodControl is the default implementation of podControlInterface.
type realPodControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ podControlInterface = &realPodControl{}
func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
return r.KubeClient.CoreV1().Pods(namespace).List(opts)
}
func (r realPodControl) DeletePod(namespace string, name string) error {
return r.KubeClient.CoreV1().Pods(namespace).Delete(name, nil)
}
type fakePodControl struct {
sync.Mutex
Pods []v1.Pod
DeletePodName []string
Err error
}
var _ podControlInterface = &fakePodControl{}
func (f *fakePodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
return &v1.PodList{Items: f.Pods}, nil
}
func (f *fakePodControl) DeletePod(namespace string, name string) error {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return f.Err
}
f.DeletePodName = append(f.DeletePodName, name)
return nil
}

View File

@ -0,0 +1,252 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/robfig/cron"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// Utilities for dealing with Jobs and CronJobs and time.
func inActiveList(sj batchv1beta1.CronJob, uid types.UID) bool {
for _, j := range sj.Status.Active {
if j.UID == uid {
return true
}
}
return false
}
func deleteFromActiveList(sj *batchv1beta1.CronJob, uid types.UID) {
if sj == nil {
return
}
newActive := []v1.ObjectReference{}
for _, j := range sj.Status.Active {
if j.UID != uid {
newActive = append(newActive, j)
}
}
sj.Status.Active = newActive
}
// getParentUIDFromJob extracts UID of job's parent and whether it was found
func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) {
controllerRef := metav1.GetControllerOf(&j)
if controllerRef == nil {
return types.UID(""), false
}
if controllerRef.Kind != "CronJob" {
glog.V(4).Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace)
return types.UID(""), false
}
return controllerRef.UID, true
}
// groupJobsByParent groups jobs into a map keyed by the job parent UID (e.g. scheduledJob).
// It has no receiver, to facilitate testing.
func groupJobsByParent(js []batchv1.Job) map[types.UID][]batchv1.Job {
jobsBySj := make(map[types.UID][]batchv1.Job)
for _, job := range js {
parentUID, found := getParentUIDFromJob(job)
if !found {
glog.V(4).Infof("Unable to get parent uid from job %s in namespace %s", job.Name, job.Namespace)
continue
}
jobsBySj[parentUID] = append(jobsBySj[parentUID], job)
}
return jobsBySj
}
// getNextStartTimeAfter gets the latest scheduled start time that is less than "now", or an error.
func getNextStartTimeAfter(schedule string, now time.Time) (time.Time, error) {
// Using robfig/cron for cron scheduled parsing and next runtime
// computation. Not using the entire library because:
// - I want to detect when we missed a runtime due to being down.
// - How do I set the time such that I can detect the last known runtime?
// - I guess the functions could launch a go-routine to start the job and
// then return.
// How to handle concurrency control.
// How to detect changes to schedules or deleted schedules and then
// update the jobs?
sched, err := cron.Parse(schedule)
if err != nil {
return time.Unix(0, 0), fmt.Errorf("Unparseable schedule: %s : %s", schedule, err)
}
return sched.Next(now), nil
}
// getRecentUnmetScheduleTimes gets a slice of times (from oldest to latest) that have passed when a Job should have started but did not.
//
// If there are too many (>100) unstarted times, just give up and return an empty slice.
// If there were missed times prior to the last known start time, then those are not returned.
func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time.Time, error) {
starts := []time.Time{}
sched, err := cron.ParseStandard(sj.Spec.Schedule)
if err != nil {
return starts, fmt.Errorf("Unparseable schedule: %s : %s", sj.Spec.Schedule, err)
}
var earliestTime time.Time
if sj.Status.LastScheduleTime != nil {
earliestTime = sj.Status.LastScheduleTime.Time
} else {
// If none found, then this is either a recently created scheduledJob,
// or the active/completed info was somehow lost (contract for status
// in kubernetes says it may need to be recreated), or that we have
// started a job, but have not noticed it yet (distributed systems can
// have arbitrary delays). In any case, use the creation time of the
// CronJob as last known start time.
earliestTime = sj.ObjectMeta.CreationTimestamp.Time
}
if sj.Spec.StartingDeadlineSeconds != nil {
// Controller is not going to schedule anything below this point
schedulingDeadline := now.Add(-time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds))
if schedulingDeadline.After(earliestTime) {
earliestTime = schedulingDeadline
}
}
if earliestTime.After(now) {
return []time.Time{}, nil
}
for t := sched.Next(earliestTime); !t.After(now); t = sched.Next(t) {
starts = append(starts, t)
// An object might miss several starts. For example, if
// controller gets wedged on friday at 5:01pm when everyone has
// gone home, and someone comes in on tuesday AM and discovers
// the problem and restarts the controller, then all the hourly
// jobs, more than 80 of them for one hourly scheduledJob, should
// all start running with no further intervention (if the scheduledJob
// allows concurrency and late starts).
//
// However, if there is a bug somewhere, or incorrect clock
// on controller's server or apiservers (for setting creationTimestamp)
// then there could be so many missed start times (it could be off
// by decades or more), that it would eat up all the CPU and memory
// of this controller. In that case, we want to not try to list
// all the missed start times.
//
// I've somewhat arbitrarily picked 100, as more than 80,
// but less than "lots".
if len(starts) > 100 {
// We can't get the most recent times so just return an empty slice
return []time.Time{}, fmt.Errorf("Too many missed start time (> 100). Set or decrease .spec.startingDeadlineSeconds or check clock skew.")
}
}
return starts, nil
}
// getJobFromTemplate makes a Job from a CronJob
func getJobFromTemplate(sj *batchv1beta1.CronJob, scheduledTime time.Time) (*batchv1.Job, error) {
// TODO: consider adding the following labels:
// nominal-start-time=$RFC_3339_DATE_OF_INTENDED_START -- for user convenience
// scheduled-job-name=$SJ_NAME -- for user convenience
labels := copyLabels(&sj.Spec.JobTemplate)
annotations := copyAnnotations(&sj.Spec.JobTemplate)
// We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice
name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime))
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
Annotations: annotations,
Name: name,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(sj, controllerKind)},
},
}
if err := legacyscheme.Scheme.Convert(&sj.Spec.JobTemplate.Spec, &job.Spec, nil); err != nil {
return nil, fmt.Errorf("unable to convert job template: %v", err)
}
return job, nil
}
// getTimeHash returns Unix Epoch Time
func getTimeHash(scheduledTime time.Time) int64 {
return scheduledTime.Unix()
}
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
func makeCreatedByRefJson(object runtime.Object) (string, error) {
createdByRef, err := ref.GetReference(legacyscheme.Scheme, object)
if err != nil {
return "", fmt.Errorf("unable to get controller reference: %v", err)
}
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning.
codec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef,
})
if err != nil {
return "", fmt.Errorf("unable to serialize controller reference: %v", err)
}
return string(createdByRefJson), nil
}
func getFinishedStatus(j *batchv1.Job) (bool, batchv1.JobConditionType) {
for _, c := range j.Status.Conditions {
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {
return true, c.Type
}
}
return false, ""
}
func IsJobFinished(j *batchv1.Job) bool {
isFinished, _ := getFinishedStatus(j)
return isFinished
}
// byJobStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker.
type byJobStartTime []batchv1.Job
func (o byJobStartTime) Len() int { return len(o) }
func (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byJobStartTime) Less(i, j int) bool {
if o[j].Status.StartTime == nil {
return o[i].Status.StartTime != nil
}
if o[i].Status.StartTime.Equal(o[j].Status.StartTime) {
return o[i].Name < o[j].Name
}
return o[i].Status.StartTime.Before(o[j].Status.StartTime)
}

View File

@ -0,0 +1,378 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"strings"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
func boolptr(b bool) *bool { return &b }
func TestGetJobFromTemplate(t *testing.T) {
// getJobFromTemplate() needs to take the job template and copy the labels and annotations
// and other fields, and add a created-by reference.
var one int64 = 1
var no bool = false
sj := batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: "snazzycats",
UID: types.UID("1a2b3c"),
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/mycronjob",
},
Spec: batchv1beta1.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
JobTemplate: batchv1beta1.JobTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"},
},
Spec: batchv1.JobSpec{
ActiveDeadlineSeconds: &one,
ManualSelector: &no,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
},
},
},
}
var job *batchv1.Job
job, err := getJobFromTemplate(&sj, time.Time{})
if err != nil {
t.Errorf("Did not expect error: %s", err)
}
if !strings.HasPrefix(job.ObjectMeta.Name, "mycronjob-") {
t.Errorf("Wrong Name")
}
if len(job.ObjectMeta.Labels) != 1 {
t.Errorf("Wrong number of labels")
}
if len(job.ObjectMeta.Annotations) != 1 {
t.Errorf("Wrong number of annotations")
}
}
func TestGetParentUIDFromJob(t *testing.T) {
j := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "foobar",
Namespace: metav1.NamespaceDefault,
},
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{{
Type: batchv1.JobComplete,
Status: v1.ConditionTrue,
}},
},
}
{
// Case 1: No ControllerRef
_, found := getParentUIDFromJob(*j)
if found {
t.Errorf("Unexpectedly found uid")
}
}
{
// Case 2: Has ControllerRef
j.ObjectMeta.SetOwnerReferences([]metav1.OwnerReference{
{
Kind: "CronJob",
UID: types.UID("5ef034e0-1890-11e6-8935-42010af0003e"),
Controller: boolptr(true),
},
})
expectedUID := types.UID("5ef034e0-1890-11e6-8935-42010af0003e")
uid, found := getParentUIDFromJob(*j)
if !found {
t.Errorf("Unexpectedly did not find uid")
} else if uid != expectedUID {
t.Errorf("Wrong UID: %v", uid)
}
}
}
func TestGroupJobsByParent(t *testing.T) {
uid1 := types.UID("11111111-1111-1111-1111-111111111111")
uid2 := types.UID("22222222-2222-2222-2222-222222222222")
uid3 := types.UID("33333333-3333-3333-3333-333333333333")
ownerReference1 := metav1.OwnerReference{
Kind: "CronJob",
UID: uid1,
Controller: boolptr(true),
}
ownerReference2 := metav1.OwnerReference{
Kind: "CronJob",
UID: uid2,
Controller: boolptr(true),
}
ownerReference3 := metav1.OwnerReference{
Kind: "CronJob",
UID: uid3,
Controller: boolptr(true),
}
{
// Case 1: There are no jobs and scheduledJobs
js := []batchv1.Job{}
jobsBySj := groupJobsByParent(js)
if len(jobsBySj) != 0 {
t.Errorf("Wrong number of items in map")
}
}
{
// Case 2: there is one controller with one job it created.
js := []batchv1.Job{
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference1}}},
}
jobsBySj := groupJobsByParent(js)
if len(jobsBySj) != 1 {
t.Errorf("Wrong number of items in map")
}
jobList1, found := jobsBySj[uid1]
if !found {
t.Errorf("Key not found")
}
if len(jobList1) != 1 {
t.Errorf("Wrong number of items in map")
}
}
{
// Case 3: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers.
// There are also two jobs with no created-by annotation.
js := []batchv1.Job{
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference1}}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference2}}},
{ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference1}}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "x", OwnerReferences: []metav1.OwnerReference{}}},
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "y", OwnerReferences: []metav1.OwnerReference{ownerReference3}}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "y", OwnerReferences: []metav1.OwnerReference{ownerReference3}}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "y", OwnerReferences: []metav1.OwnerReference{}}},
}
jobsBySj := groupJobsByParent(js)
if len(jobsBySj) != 3 {
t.Errorf("Wrong number of items in map")
}
jobList1, found := jobsBySj[uid1]
if !found {
t.Errorf("Key not found")
}
if len(jobList1) != 2 {
t.Errorf("Wrong number of items in map")
}
jobList2, found := jobsBySj[uid2]
if !found {
t.Errorf("Key not found")
}
if len(jobList2) != 1 {
t.Errorf("Wrong number of items in map")
}
jobList3, found := jobsBySj[uid3]
if !found {
t.Errorf("Key not found")
}
if len(jobList3) != 2 {
t.Errorf("Wrong number of items in map")
}
}
}
func TestGetRecentUnmetScheduleTimes(t *testing.T) {
// schedule is hourly on the hour
schedule := "0 * * * ?"
// T1 is a scheduled start time of that schedule
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:00:00Z")
if err != nil {
t.Errorf("test setup error: %v", err)
}
// T2 is a scheduled start time of that schedule after T1
T2, err := time.Parse(time.RFC3339, "2016-05-19T11:00:00Z")
if err != nil {
t.Errorf("test setup error: %v", err)
}
sj := batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: metav1.NamespaceDefault,
UID: types.UID("1a2b3c"),
},
Spec: batchv1beta1.CronJobSpec{
Schedule: schedule,
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
JobTemplate: batchv1beta1.JobTemplateSpec{},
},
}
{
// Case 1: no known start times, and none needed yet.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Current time is more than creation time, but less than T1.
now := T1.Add(-7 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 0 {
t.Errorf("expected no start times, got: %v", times)
}
}
{
// Case 2: no known start times, and one needed.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Current time is after T1
now := T1.Add(2 * time.Second)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 1 {
t.Errorf("expected 1 start time, got: %v", times)
} else if !times[0].Equal(T1) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
}
{
// Case 3: known LastScheduleTime, no start needed.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Status shows a start at the expected time.
sj.Status.LastScheduleTime = &metav1.Time{Time: T1}
// Current time is after T1
now := T1.Add(2 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 0 {
t.Errorf("expected 0 start times, got: %v", times)
}
}
{
// Case 4: known LastScheduleTime, a start needed
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Status shows a start at the expected time.
sj.Status.LastScheduleTime = &metav1.Time{Time: T1}
// Current time is after T1 and after T2
now := T2.Add(5 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 1 {
t.Errorf("expected 1 start times, got: %v", times)
} else if !times[0].Equal(T2) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
}
{
// Case 5: known LastScheduleTime, two starts needed
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
// Current time is after T1 and after T2
now := T2.Add(5 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 2 {
t.Errorf("expected 2 start times, got: %v", times)
} else {
if !times[0].Equal(T1) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
if !times[1].Equal(T2) {
t.Errorf("expected: %v, got: %v", T2, times[1])
}
}
}
{
// Case 6: now is way way ahead of last start time, and there is no deadline.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
now := T2.Add(10 * 24 * time.Hour)
_, err := getRecentUnmetScheduleTimes(sj, now)
if err == nil {
t.Errorf("expected an error")
}
}
{
// Case 7: now is way way ahead of last start time, but there is a short deadline.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
now := T2.Add(10 * 24 * time.Hour)
// Deadline is short
deadline := int64(2 * 60 * 60)
sj.Spec.StartingDeadlineSeconds = &deadline
_, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error")
}
}
}