Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

60
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/BUILD generated vendored Normal file
View File

@ -0,0 +1,60 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"scale.go",
"scalejob.go",
],
importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/scale",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubectl:go_default_library",
"//pkg/kubectl/cmd/util:go_default_library",
"//pkg/kubectl/util/i18n:go_default_library",
"//pkg/kubectl/util/templates:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library",
"//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["scalejob_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

294
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/scale/scale.go generated vendored Normal file
View File

@ -0,0 +1,294 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scale
import (
"fmt"
"time"
"github.com/spf13/cobra"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/genericclioptions/printers"
"k8s.io/cli-runtime/pkg/genericclioptions/resource"
"k8s.io/client-go/kubernetes"
batchclient "k8s.io/client-go/kubernetes/typed/batch/v1"
"k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
"k8s.io/kubernetes/pkg/kubectl/util/templates"
)
var (
scaleLong = templates.LongDesc(i18n.T(`
Set a new size for a Deployment, ReplicaSet, Replication Controller, or StatefulSet.
Scale also allows users to specify one or more preconditions for the scale action.
If --current-replicas or --resource-version is specified, it is validated before the
scale is attempted, and it is guaranteed that the precondition holds true when the
scale is sent to the server.`))
scaleExample = templates.Examples(i18n.T(`
# Scale a replicaset named 'foo' to 3.
kubectl scale --replicas=3 rs/foo
# Scale a resource identified by type and name specified in "foo.yaml" to 3.
kubectl scale --replicas=3 -f foo.yaml
# If the deployment named mysql's current size is 2, scale mysql to 3.
kubectl scale --current-replicas=2 --replicas=3 deployment/mysql
# Scale multiple replication controllers.
kubectl scale --replicas=5 rc/foo rc/bar rc/baz
# Scale statefulset named 'web' to 3.
kubectl scale --replicas=3 statefulset/web`))
)
const (
timeout = 5 * time.Minute
)
type ScaleOptions struct {
FilenameOptions resource.FilenameOptions
RecordFlags *genericclioptions.RecordFlags
PrintFlags *genericclioptions.PrintFlags
PrintObj printers.ResourcePrinterFunc
Selector string
All bool
Replicas int
ResourceVersion string
CurrentReplicas int
Timeout time.Duration
Recorder genericclioptions.Recorder
builder *resource.Builder
namespace string
enforceNamespace bool
args []string
shortOutput bool
clientSet kubernetes.Interface
scaler kubectl.Scaler
unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error)
parent string
genericclioptions.IOStreams
}
func NewScaleOptions(ioStreams genericclioptions.IOStreams) *ScaleOptions {
return &ScaleOptions{
PrintFlags: genericclioptions.NewPrintFlags("scaled"),
RecordFlags: genericclioptions.NewRecordFlags(),
CurrentReplicas: -1,
Recorder: genericclioptions.NoopRecorder{},
IOStreams: ioStreams,
}
}
// NewCmdScale returns a cobra command with the appropriate configuration and flags to run scale
func NewCmdScale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command {
o := NewScaleOptions(ioStreams)
validArgs := []string{"deployment", "replicaset", "replicationcontroller", "statefulset"}
cmd := &cobra.Command{
Use: "scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT (-f FILENAME | TYPE NAME)",
DisableFlagsInUseLine: true,
Short: i18n.T("Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job"),
Long: scaleLong,
Example: scaleExample,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, cmd, args))
cmdutil.CheckErr(o.Validate(cmd))
cmdutil.CheckErr(o.RunScale())
},
ValidArgs: validArgs,
}
o.RecordFlags.AddFlags(cmd)
o.PrintFlags.AddFlags(cmd)
cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
cmd.Flags().BoolVar(&o.All, "all", o.All, "Select all resources in the namespace of the specified resource types")
cmd.Flags().StringVar(&o.ResourceVersion, "resource-version", o.ResourceVersion, i18n.T("Precondition for resource version. Requires that the current resource version match this value in order to scale."))
cmd.Flags().IntVar(&o.CurrentReplicas, "current-replicas", o.CurrentReplicas, "Precondition for current size. Requires that the current size of the resource match this value in order to scale.")
cmd.Flags().IntVar(&o.Replicas, "replicas", o.Replicas, "The new desired number of replicas. Required.")
cmd.MarkFlagRequired("replicas")
cmd.Flags().DurationVar(&o.Timeout, "timeout", 0, "The length of time to wait before giving up on a scale operation, zero means don't wait. Any other values should contain a corresponding time unit (e.g. 1s, 2m, 3h).")
cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to set a new size")
return cmd
}
func (o *ScaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
var err error
o.RecordFlags.Complete(cmd)
o.Recorder, err = o.RecordFlags.ToRecorder()
if err != nil {
return err
}
printer, err := o.PrintFlags.ToPrinter()
if err != nil {
return err
}
o.PrintObj = printer.PrintObj
o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
o.builder = f.NewBuilder()
o.args = args
o.shortOutput = cmdutil.GetFlagString(cmd, "output") == "name"
o.clientSet, err = f.KubernetesClientSet()
if err != nil {
return err
}
o.scaler, err = scaler(f)
if err != nil {
return err
}
o.unstructuredClientForMapping = f.UnstructuredClientForMapping
o.parent = cmd.Parent().Name()
return nil
}
func (o *ScaleOptions) Validate(cmd *cobra.Command) error {
if o.Replicas < 0 {
return fmt.Errorf("The --replicas=COUNT flag is required, and COUNT must be greater than or equal to 0")
}
return nil
}
// RunScale executes the scaling
func (o *ScaleOptions) RunScale() error {
r := o.builder.
Unstructured().
ContinueOnError().
NamespaceParam(o.namespace).DefaultNamespace().
FilenameParam(o.enforceNamespace, &o.FilenameOptions).
ResourceTypeOrNameArgs(o.All, o.args...).
Flatten().
LabelSelectorParam(o.Selector).
Do()
err := r.Err()
if err != nil {
return err
}
infos := []*resource.Info{}
err = r.Visit(func(info *resource.Info, err error) error {
if err == nil {
infos = append(infos, info)
}
return nil
})
if len(o.ResourceVersion) != 0 && len(infos) > 1 {
return fmt.Errorf("cannot use --resource-version with multiple resources")
}
precondition := &kubectl.ScalePrecondition{Size: o.CurrentReplicas, ResourceVersion: o.ResourceVersion}
retry := kubectl.NewRetryParams(1*time.Second, 5*time.Minute)
var waitForReplicas *kubectl.RetryParams
if o.Timeout != 0 {
waitForReplicas = kubectl.NewRetryParams(1*time.Second, timeout)
}
counter := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
mapping := info.ResourceMapping()
if mapping.Resource.GroupResource() == (schema.GroupResource{Group: "batch", Resource: "jobs"}) {
// go down the legacy jobs path. This can be removed in 3.14 For now, contain it.
fmt.Fprintf(o.ErrOut, "%s scale job is DEPRECATED and will be removed in a future version.\n", o.parent)
if err := ScaleJob(info, o.clientSet.Batch(), uint(o.Replicas), precondition, retry, waitForReplicas); err != nil {
return err
}
} else {
if err := o.scaler.Scale(info.Namespace, info.Name, uint(o.Replicas), precondition, retry, waitForReplicas, mapping.Resource.GroupResource()); err != nil {
return err
}
}
// if the recorder makes a change, compute and create another patch
if mergePatch, err := o.Recorder.MakeRecordMergePatch(info.Object); err != nil {
klog.V(4).Infof("error recording current command: %v", err)
} else if len(mergePatch) > 0 {
client, err := o.unstructuredClientForMapping(mapping)
if err != nil {
return err
}
helper := resource.NewHelper(client, mapping)
if _, err := helper.Patch(info.Namespace, info.Name, types.MergePatchType, mergePatch, nil); err != nil {
klog.V(4).Infof("error recording reason: %v", err)
}
}
counter++
return o.PrintObj(info.Object, o.Out)
})
if err != nil {
return err
}
if counter == 0 {
return fmt.Errorf("no objects passed to scale")
}
return nil
}
func ScaleJob(info *resource.Info, jobsClient batchclient.JobsGetter, count uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
scaler := JobPsuedoScaler{
JobsClient: jobsClient,
}
var jobPreconditions *ScalePrecondition
if preconditions != nil {
jobPreconditions = &ScalePrecondition{Size: preconditions.Size, ResourceVersion: preconditions.ResourceVersion}
}
var jobRetry *RetryParams
if retry != nil {
jobRetry = &RetryParams{Interval: retry.Interval, Timeout: retry.Timeout}
}
var jobWaitForReplicas *RetryParams
if waitForReplicas != nil {
jobWaitForReplicas = &RetryParams{Interval: waitForReplicas.Interval, Timeout: waitForReplicas.Timeout}
}
return scaler.Scale(info.Namespace, info.Name, count, jobPreconditions, jobRetry, jobWaitForReplicas)
}
func scaler(f cmdutil.Factory) (kubectl.Scaler, error) {
scalesGetter, err := cmdutil.ScaleClientFn(f)
if err != nil {
return nil, err
}
return kubectl.NewScaler(scalesGetter), nil
}

View File

@ -0,0 +1,161 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scale
import (
"fmt"
"strconv"
"time"
batch "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
batchclient "k8s.io/client-go/kubernetes/typed/batch/v1"
)
// ScalePrecondition is a deprecated precondition
type ScalePrecondition struct {
Size int
ResourceVersion string
}
// RetryParams is a deprecated retry struct
type RetryParams struct {
Interval, Timeout time.Duration
}
// PreconditionError is a deprecated error
type PreconditionError struct {
Precondition string
ExpectedValue string
ActualValue string
}
func (pe PreconditionError) Error() string {
return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue)
}
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
func scaleCondition(r *JobPsuedoScaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string) wait.ConditionFunc {
return func() (bool, error) {
rv, err := r.ScaleSimple(namespace, name, precondition, count)
if updatedResourceVersion != nil {
*updatedResourceVersion = rv
}
// Retry only on update conflicts.
if errors.IsConflict(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
}
// JobPsuedoScaler is a deprecated scale-similar thing that doesn't obey scale semantics
type JobPsuedoScaler struct {
JobsClient batchclient.JobsGetter
}
// ScaleSimple is responsible for updating job's parallelism. It returns the
// resourceVersion of the job if the update is successful.
func (scaler *JobPsuedoScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) {
job, err := scaler.JobsClient.Jobs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", err
}
if preconditions != nil {
if err := validateJob(job, preconditions); err != nil {
return "", err
}
}
parallelism := int32(newSize)
job.Spec.Parallelism = &parallelism
updatedJob, err := scaler.JobsClient.Jobs(namespace).Update(job)
if err != nil {
return "", err
}
return updatedJob.ObjectMeta.ResourceVersion, nil
}
// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired
// number, which can be less than requested based on job's current progress.
func (scaler *JobPsuedoScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := scaleCondition(scaler, preconditions, namespace, name, newSize, nil)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
job, err := scaler.JobsClient.Jobs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, jobHasDesiredParallelism(scaler.JobsClient, job))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
return nil
}
// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count
// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count.
func jobHasDesiredParallelism(jobClient batchclient.JobsGetter, job *batch.Job) wait.ConditionFunc {
return func() (bool, error) {
job, err := jobClient.Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
// desired parallelism can be either the exact number, in which case return immediately
if job.Status.Active == *job.Spec.Parallelism {
return true, nil
}
if job.Spec.Completions == nil {
// A job without specified completions needs to wait for Active to reach Parallelism.
return false, nil
}
// otherwise count successful
progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded
return progress <= 0, nil
}
}
func validateJob(job *batch.Job, precondition *ScalePrecondition) error {
if precondition.Size != -1 && job.Spec.Parallelism == nil {
return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"}
}
if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size {
return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))}
}
if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion {
return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion}
}
return nil
}

View File

@ -0,0 +1,292 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scale
import (
"errors"
"testing"
batch "k8s.io/api/batch/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/apimachinery/pkg/apis/testapigroup/v1"
"k8s.io/client-go/kubernetes/fake"
batchclient "k8s.io/client-go/kubernetes/typed/batch/v1"
testcore "k8s.io/client-go/testing"
)
type errorJobs struct {
batchclient.JobInterface
conflict bool
invalid bool
}
func (c *errorJobs) Update(job *batch.Job) (*batch.Job, error) {
switch {
case c.invalid:
return nil, kerrors.NewInvalid(api.Kind(job.Kind), job.Name, nil)
case c.conflict:
return nil, kerrors.NewConflict(api.Resource(job.Kind), job.Name, nil)
}
return nil, errors.New("Job update failure")
}
func (c *errorJobs) Get(name string, options metav1.GetOptions) (*batch.Job, error) {
zero := int32(0)
return &batch.Job{
Spec: batch.JobSpec{
Parallelism: &zero,
},
}, nil
}
type errorJobClient struct {
batchclient.JobsGetter
conflict bool
invalid bool
}
func (c *errorJobClient) Jobs(namespace string) batchclient.JobInterface {
return &errorJobs{
JobInterface: c.JobsGetter.Jobs(namespace),
conflict: c.conflict,
invalid: c.invalid,
}
}
func TestJobScaleRetry(t *testing.T) {
fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), conflict: true}
scaler := &JobPsuedoScaler{JobsClient: fake}
preconditions := ScalePrecondition{-1, ""}
count := uint(3)
name := "foo"
namespace := "default"
scaleFunc := scaleCondition(scaler, &preconditions, namespace, name, count, nil)
pass, err := scaleFunc()
if pass != false {
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
}
if err != nil {
t.Errorf("Did not expect an error on update failure, got %v", err)
}
preconditions = ScalePrecondition{3, ""}
scaleFunc = scaleCondition(scaler, &preconditions, namespace, name, count, nil)
pass, err = scaleFunc()
if err == nil {
t.Error("Expected error on precondition failure")
}
}
func job() *batch.Job {
return &batch.Job{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: "foo",
},
}
}
func TestJobScale(t *testing.T) {
fakeClientset := fake.NewSimpleClientset(job())
scaler := &JobPsuedoScaler{JobsClient: fakeClientset.Batch()}
preconditions := ScalePrecondition{-1, ""}
count := uint(3)
name := "foo"
scaler.Scale("default", name, count, &preconditions, nil, nil)
actions := fakeClientset.Actions()
if len(actions) != 2 {
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
}
if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name {
t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name)
}
if action, ok := actions[1].(testcore.UpdateAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || *action.GetObject().(*batch.Job).Spec.Parallelism != int32(count) {
t.Errorf("unexpected action %v, expected update-job with parallelism = %d", actions[1], count)
}
}
func TestJobScaleInvalid(t *testing.T) {
fake := &errorJobClient{JobsGetter: fake.NewSimpleClientset().Batch(), invalid: true}
scaler := &JobPsuedoScaler{JobsClient: fake}
preconditions := ScalePrecondition{-1, ""}
count := uint(3)
name := "foo"
namespace := "default"
scaleFunc := scaleCondition(scaler, &preconditions, namespace, name, count, nil)
pass, err := scaleFunc()
if pass {
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
}
if err == nil {
t.Errorf("Expected error on invalid update failure, got %v", err)
}
}
func TestJobScaleFailsPreconditions(t *testing.T) {
ten := int32(10)
fake := fake.NewSimpleClientset(&batch.Job{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: "foo",
},
Spec: batch.JobSpec{
Parallelism: &ten,
},
})
scaler := &JobPsuedoScaler{JobsClient: fake.Batch()}
preconditions := ScalePrecondition{2, ""}
count := uint(3)
name := "foo"
scaler.Scale("default", name, count, &preconditions, nil, nil)
actions := fake.Actions()
if len(actions) != 1 {
t.Errorf("unexpected actions: %v, expected 1 actions (get)", actions)
}
if action, ok := actions[0].(testcore.GetAction); !ok || action.GetResource().GroupResource() != batch.Resource("jobs") || action.GetName() != name {
t.Errorf("unexpected action: %v, expected get-job %s", actions[0], name)
}
}
func TestValidateJob(t *testing.T) {
zero, ten, twenty := int32(0), int32(10), int32(20)
tests := []struct {
preconditions ScalePrecondition
job batch.Job
expectError bool
test string
}{
{
preconditions: ScalePrecondition{-1, ""},
expectError: false,
test: "defaults",
},
{
preconditions: ScalePrecondition{-1, ""},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "foo",
},
Spec: batch.JobSpec{
Parallelism: &ten,
},
},
expectError: false,
test: "defaults 2",
},
{
preconditions: ScalePrecondition{0, ""},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "foo",
},
Spec: batch.JobSpec{
Parallelism: &zero,
},
},
expectError: false,
test: "size matches",
},
{
preconditions: ScalePrecondition{-1, "foo"},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "foo",
},
Spec: batch.JobSpec{
Parallelism: &ten,
},
},
expectError: false,
test: "resource version matches",
},
{
preconditions: ScalePrecondition{10, "foo"},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "foo",
},
Spec: batch.JobSpec{
Parallelism: &ten,
},
},
expectError: false,
test: "both match",
},
{
preconditions: ScalePrecondition{10, "foo"},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "foo",
},
Spec: batch.JobSpec{
Parallelism: &twenty,
},
},
expectError: true,
test: "size different",
},
{
preconditions: ScalePrecondition{10, "foo"},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "foo",
},
},
expectError: true,
test: "parallelism nil",
},
{
preconditions: ScalePrecondition{10, "foo"},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "bar",
},
Spec: batch.JobSpec{
Parallelism: &ten,
},
},
expectError: true,
test: "version different",
},
{
preconditions: ScalePrecondition{10, "foo"},
job: batch.Job{
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: "bar",
},
Spec: batch.JobSpec{
Parallelism: &twenty,
},
},
expectError: true,
test: "both different",
},
}
for _, test := range tests {
err := validateJob(&test.job, &test.preconditions)
if err != nil && !test.expectError {
t.Errorf("unexpected error: %v (%s)", err, test.test)
}
if err == nil && test.expectError {
t.Errorf("expected an error: %v (%s)", err, test.test)
}
}
}