vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -21,49 +21,48 @@ import (
"strconv"
"time"
autoscalingapi "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
scaleclient "k8s.io/client-go/scale"
appsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
batchclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion"
coreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
client "k8s.io/kubernetes/pkg/client/unversioned"
)
// TODO: Figure out if we should be waiting on initializers in the Scale() functions below.
// Scaler provides an interface for resources that can be scaled.
type Scaler interface {
// Scale scales the named resource after checking preconditions. It optionally
// retries in the event of resource version mismatch (if retry is not nil),
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
// TODO: Make the implementation of this watch-based (#56075) once #31345 is fixed.
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error
// ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but
// a necessary building block for Scale
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error)
}
func ScalerFor(kind schema.GroupKind, c internalclientset.Interface) (Scaler, error) {
// ScalerFor gets a scaler for a given resource
func ScalerFor(kind schema.GroupKind, jobsClient batchclient.JobsGetter, scalesGetter scaleclient.ScalesGetter, gr schema.GroupResource) Scaler {
// it seems like jobs dont't follow "normal" scale semantics.
// For example it is not clear whether HPA could make use of it or not.
// For more details see: https://github.com/kubernetes/kubernetes/pull/58468
switch kind {
case api.Kind("ReplicationController"):
return &ReplicationControllerScaler{c.Core()}, nil
case extensions.Kind("ReplicaSet"), apps.Kind("ReplicaSet"):
return &ReplicaSetScaler{c.Extensions()}, nil
case batch.Kind("Job"):
return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface.
case apps.Kind("StatefulSet"):
return &StatefulSetScaler{c.Apps()}, nil
case extensions.Kind("Deployment"), apps.Kind("Deployment"):
return &DeploymentScaler{c.Extensions()}, nil
return &jobScaler{jobsClient} // Either kind of job can be scaled with Batch interface.
default:
return &genericScaler{scalesGetter, gr}
}
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
}
// ScalePrecondition describes a condition that must be true for the scale to take place
@ -162,6 +161,7 @@ func (precondition *ScalePrecondition) ValidateReplicationController(controller
return nil
}
// TODO(p0lyn0mial): remove ReplicationControllerScaler
type ReplicationControllerScaler struct {
c coreclient.ReplicationControllersGetter
}
@ -200,51 +200,23 @@ func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
var updatedResourceVersion string
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, &updatedResourceVersion)
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
checkRC := func(rc *api.ReplicationController) bool {
if uint(rc.Spec.Replicas) != newSize {
// the size is changed by other party. Don't need to wait for the new change to complete.
return true
}
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas
}
// If number of replicas doesn't change, then the update may not event
// be sent to underlying databse (we don't send no-op changes).
// In such case, <updatedResourceVersion> will have value of the most
// recent update (which may be far in the past) so we may get "too old
// RV" error from watch or potentially no ReplicationController events
// will be deliver, since it may already be in the expected state.
// To protect from these two, we first issue Get() to ensure that we
// are not already in the expected state.
currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
rc, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
if !checkRC(currentRC) {
watchOptions := metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(),
ResourceVersion: updatedResourceVersion,
}
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
if err != nil {
return err
}
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added && event.Type != watch.Modified {
return false, nil
}
return checkRC(event.Object.(*api.ReplicationController)), nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
if rc.Initializers != nil {
return nil
}
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, ControllerHasDesiredReplicas(scaler.c, rc))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
return nil
}
@ -260,6 +232,7 @@ func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions
return nil
}
// TODO(p0lyn0mial): remove ReplicaSetScaler
type ReplicaSetScaler struct {
c extensionsclient.ReplicaSetsGetter
}
@ -299,7 +272,7 @@ func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, prec
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
@ -310,7 +283,7 @@ func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, prec
if rs.Initializers != nil {
return nil
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ReplicaSetHasDesiredReplicas(scaler.c, rs))
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, ReplicaSetHasDesiredReplicas(scaler.c, rs))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
@ -334,6 +307,7 @@ func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error {
return nil
}
// TODO(p0lyn0mial): remove StatefulSetsGetter
type StatefulSetScaler struct {
c appsclient.StatefulSetsGetter
}
@ -370,7 +344,7 @@ func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, pre
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
@ -381,7 +355,7 @@ func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, pre
if job.Initializers != nil {
return nil
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.StatefulSetHasDesiredReplicas(scaler.c, job))
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, StatefulSetHasDesiredReplicas(scaler.c, job))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
@ -390,13 +364,13 @@ func (scaler *StatefulSetScaler) Scale(namespace, name string, newSize uint, pre
return nil
}
type JobScaler struct {
type jobScaler struct {
c batchclient.JobsGetter
}
// ScaleSimple is responsible for updating job's parallelism. It returns the
// resourceVersion of the job if the update is successful.
func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) {
func (scaler *jobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) {
job, err := scaler.c.Jobs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return "", ScaleError{ScaleGetFailure, "", err}
@ -421,7 +395,7 @@ func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *Scal
// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired
// number, which can be less than requested based on job's current progress.
func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
func (scaler *jobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
@ -430,7 +404,7 @@ func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditio
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
@ -438,7 +412,7 @@ func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditio
if err != nil {
return err
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.JobHasDesiredParallelism(scaler.c, job))
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, JobHasDesiredParallelism(scaler.c, job))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
@ -458,6 +432,7 @@ func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions
return nil
}
// TODO(p0lyn0mial): remove DeploymentScaler
type DeploymentScaler struct {
c extensionsclient.DeploymentsGetter
}
@ -500,7 +475,7 @@ func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, prec
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
@ -508,7 +483,7 @@ func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, prec
if err != nil {
return err
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.DeploymentHasDesiredReplicas(scaler.c, deployment))
err = wait.PollImmediate(waitForReplicas.Interval, waitForReplicas.Timeout, DeploymentHasDesiredReplicas(scaler.c, deployment))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
@ -516,3 +491,90 @@ func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, prec
}
return nil
}
// validateGeneric ensures that the preconditions match. Returns nil if they are valid, otherwise an error
// TODO(p0lyn0mial): when the work on GenericScaler is done, rename validateGeneric to validate
func (precondition *ScalePrecondition) validateGeneric(scale *autoscalingapi.Scale) error {
if precondition.Size != -1 && int(scale.Spec.Replicas) != precondition.Size {
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(scale.Spec.Replicas))}
}
if len(precondition.ResourceVersion) > 0 && scale.ResourceVersion != precondition.ResourceVersion {
return PreconditionError{"resource version", precondition.ResourceVersion, scale.ResourceVersion}
}
return nil
}
// genericScaler can update scales for resources in a particular namespace
type genericScaler struct {
scaleNamespacer scaleclient.ScalesGetter
targetGR schema.GroupResource
}
var _ Scaler = &genericScaler{}
// ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful.
func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (updatedResourceVersion string, err error) {
scale, err := s.scaleNamespacer.Scales(namespace).Get(s.targetGR, name)
if err != nil {
return "", ScaleError{ScaleGetFailure, "", err}
}
if preconditions != nil {
if err := preconditions.validateGeneric(scale); err != nil {
return "", err
}
}
scale.Spec.Replicas = int32(newSize)
updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(s.targetGR, scale)
if err != nil {
if errors.IsConflict(err) {
return "", ScaleError{ScaleUpdateConflictFailure, scale.ResourceVersion, err}
}
return "", ScaleError{ScaleUpdateFailure, scale.ResourceVersion, err}
}
return updatedScale.ResourceVersion, nil
}
// Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
err := wait.PollImmediate(
waitForReplicas.Interval,
waitForReplicas.Timeout,
scaleHasDesiredReplicas(s.scaleNamespacer, s.targetGR, resourceName, namespace, int32(newSize)))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", resourceName)
}
return err
}
return nil
}
// scaleHasDesiredReplicas returns a condition that will be true if and only if the desired replica
// count for a scale (Spec) equals its updated replicas count (Status)
func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, desiredReplicas int32) wait.ConditionFunc {
return func() (bool, error) {
actualScale, err := sClient.Scales(namespace).Get(gr, resourceName)
if err != nil {
return false, err
}
// this means the desired scale target has been reset by something else
if actualScale.Spec.Replicas != desiredReplicas {
return true, nil
}
return actualScale.Spec.Replicas == actualScale.Status.Replicas &&
desiredReplicas == actualScale.Status.Replicas, nil
}
}