mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
72
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
72
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
@ -22,28 +22,27 @@ go_library(
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -71,20 +70,21 @@ go_test(
|
||||
"//pkg/apis/storage/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/OWNERS
generated
vendored
@ -5,7 +5,8 @@ approvers:
|
||||
- mfojtik
|
||||
reviewers:
|
||||
- janetkuo
|
||||
- nikhiljindal
|
||||
- kargakis
|
||||
- mfojtik
|
||||
- tnozicka
|
||||
labels:
|
||||
- sig/apps
|
||||
|
52
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
52
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -99,7 +99,7 @@ type DeploymentController struct {
|
||||
// NewDeploymentController creates a new DeploymentController.
|
||||
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||
|
||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@ -149,8 +149,8 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer dc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting deployment controller")
|
||||
defer glog.Infof("Shutting down deployment controller")
|
||||
klog.Infof("Starting deployment controller")
|
||||
defer klog.Infof("Shutting down deployment controller")
|
||||
|
||||
if !controller.WaitForCacheSync("deployment", stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
|
||||
return
|
||||
@ -165,14 +165,14 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
func (dc *DeploymentController) addDeployment(obj interface{}) {
|
||||
d := obj.(*apps.Deployment)
|
||||
glog.V(4).Infof("Adding deployment %s", d.Name)
|
||||
klog.V(4).Infof("Adding deployment %s", d.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) updateDeployment(old, cur interface{}) {
|
||||
oldD := old.(*apps.Deployment)
|
||||
curD := cur.(*apps.Deployment)
|
||||
glog.V(4).Infof("Updating deployment %s", oldD.Name)
|
||||
klog.V(4).Infof("Updating deployment %s", oldD.Name)
|
||||
dc.enqueueDeployment(curD)
|
||||
}
|
||||
|
||||
@ -190,7 +190,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting deployment %s", d.Name)
|
||||
klog.V(4).Infof("Deleting deployment %s", d.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ReplicaSet %s added.", rs.Name)
|
||||
klog.V(4).Infof("ReplicaSet %s added.", rs.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
return
|
||||
}
|
||||
@ -222,7 +222,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) {
|
||||
if len(ds) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name)
|
||||
klog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name)
|
||||
for _, d := range ds {
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
@ -242,7 +242,7 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet)
|
||||
if len(deployments) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
glog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s",
|
||||
klog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s",
|
||||
rs.Namespace, rs.Name, rs.Labels, deployments[0].Namespace, deployments[0].Name)
|
||||
}
|
||||
return deployments
|
||||
@ -277,7 +277,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
|
||||
klog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
return
|
||||
}
|
||||
@ -290,7 +290,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
if len(ds) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name)
|
||||
klog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name)
|
||||
for _, d := range ds {
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
@ -329,7 +329,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
|
||||
klog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
|
||||
dc.enqueueDeployment(d)
|
||||
}
|
||||
|
||||
@ -353,7 +353,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
klog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
|
||||
// Sync if this Deployment now has no more Pods.
|
||||
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1()))
|
||||
@ -421,7 +421,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deploymen
|
||||
}
|
||||
rs, err = dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
|
||||
if err != nil || rs.UID != controllerRef.UID {
|
||||
glog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err)
|
||||
klog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -481,13 +481,13 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
|
||||
}
|
||||
|
||||
if dc.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing deployment %v: %v", key, err)
|
||||
klog.V(2).Infof("Error syncing deployment %v: %v", key, err)
|
||||
dc.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
utilruntime.HandleError(err)
|
||||
glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
|
||||
klog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
|
||||
dc.queue.Forget(key)
|
||||
}
|
||||
|
||||
@ -559,9 +559,9 @@ func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsLis
|
||||
// This function is not meant to be invoked concurrently with the same key.
|
||||
func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
startTime := time.Now()
|
||||
glog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
|
||||
klog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -570,7 +570,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
}
|
||||
deployment, err := dc.dLister.Deployments(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("Deployment %v has been deleted", key)
|
||||
klog.V(2).Infof("Deployment %v has been deleted", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -608,7 +608,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
}
|
||||
|
||||
if d.DeletionTimestamp != nil {
|
||||
return dc.syncStatusOnly(d, rsList, podMap)
|
||||
return dc.syncStatusOnly(d, rsList)
|
||||
}
|
||||
|
||||
// Update deployment conditions with an Unknown condition when pausing/resuming
|
||||
@ -619,29 +619,29 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
}
|
||||
|
||||
if d.Spec.Paused {
|
||||
return dc.sync(d, rsList, podMap)
|
||||
return dc.sync(d, rsList)
|
||||
}
|
||||
|
||||
// rollback is not re-entrant in case the underlying replica sets are updated with a new
|
||||
// revision so we should ensure that we won't proceed to update replica sets until we
|
||||
// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
|
||||
if getRollbackTo(d) != nil {
|
||||
return dc.rollback(d, rsList, podMap)
|
||||
return dc.rollback(d, rsList)
|
||||
}
|
||||
|
||||
scalingEvent, err := dc.isScalingEvent(d, rsList, podMap)
|
||||
scalingEvent, err := dc.isScalingEvent(d, rsList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if scalingEvent {
|
||||
return dc.sync(d, rsList, podMap)
|
||||
return dc.sync(d, rsList)
|
||||
}
|
||||
|
||||
switch d.Spec.Strategy.Type {
|
||||
case apps.RecreateDeploymentStrategyType:
|
||||
return dc.rolloutRecreate(d, rsList, podMap)
|
||||
case apps.RollingUpdateDeploymentStrategyType:
|
||||
return dc.rolloutRolling(d, rsList, podMap)
|
||||
return dc.rolloutRolling(d, rsList)
|
||||
}
|
||||
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
|
||||
}
|
||||
|
23
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
23
vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller_test.go
generated
vendored
@ -45,6 +45,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -77,7 +78,7 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
|
||||
|
||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
|
||||
d := apps.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1", Kind: "Deployment"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
@ -120,6 +121,7 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
||||
|
||||
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "ReplicaSet"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
UID: uuid.NewUUID(),
|
||||
@ -135,15 +137,6 @@ func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaS
|
||||
}
|
||||
}
|
||||
|
||||
func getKey(d *apps.Deployment, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(d); err != nil {
|
||||
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
|
||||
return ""
|
||||
} else {
|
||||
return key
|
||||
}
|
||||
}
|
||||
|
||||
type fixture struct {
|
||||
t *testing.T
|
||||
|
||||
@ -285,7 +278,7 @@ func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||
@ -298,7 +291,7 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||
f.objects = append(f.objects, d)
|
||||
|
||||
f.expectUpdateDeploymentStatusAction(d)
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestSyncDeploymentDeletionRace(t *testing.T) {
|
||||
@ -323,7 +316,7 @@ func TestSyncDeploymentDeletionRace(t *testing.T) {
|
||||
f.expectGetDeploymentAction(d)
|
||||
// Sync should fail and requeue to let cache catch up.
|
||||
// Don't start informers, since we don't want cache to catch up for this test.
|
||||
f.runExpectError(getKey(d, t), false)
|
||||
f.runExpectError(testutil.GetKey(d, t), false)
|
||||
}
|
||||
|
||||
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
||||
@ -337,7 +330,7 @@ func TestDontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
||||
|
||||
// Normally there should be a status update to sync observedGeneration but the fake
|
||||
// deployment has no generation set so there is no action happpening here.
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
func TestReentrantRollback(t *testing.T) {
|
||||
@ -364,7 +357,7 @@ func TestReentrantRollback(t *testing.T) {
|
||||
// Rollback is done here
|
||||
f.expectUpdateDeploymentAction(d)
|
||||
// Expect no update on replica sets though
|
||||
f.run(getKey(d, t))
|
||||
f.run(testutil.GetKey(d, t))
|
||||
}
|
||||
|
||||
// TestPodDeletionEnqueuesRecreateDeployment ensures that the deletion of a pod
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -36,7 +36,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
|
||||
newStatus := calculateStatus(allRSs, newRS, d)
|
||||
|
||||
// If there is no progressDeadlineSeconds set, remove any Progressing condition.
|
||||
if d.Spec.ProgressDeadlineSeconds == nil {
|
||||
if !util.HasProgressDeadline(d) {
|
||||
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
|
||||
}
|
||||
|
||||
@ -47,7 +47,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, new
|
||||
isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason
|
||||
// Check for progress only if there is a progress deadline set and the latest rollout
|
||||
// hasn't completed yet.
|
||||
if d.Spec.ProgressDeadlineSeconds != nil && !isCompleteDeployment {
|
||||
if util.HasProgressDeadline(d) && !isCompleteDeployment {
|
||||
switch {
|
||||
case util.DeploymentComplete(d, &newStatus):
|
||||
// Update the deployment conditions with a message for the new replica set that
|
||||
@ -159,7 +159,7 @@ var nowFn = func() time.Time { return time.Now() }
|
||||
func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration {
|
||||
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
// Can't estimate progress if there is no deadline in the spec or progressing condition in the current status.
|
||||
if d.Spec.ProgressDeadlineSeconds == nil || currentCond == nil {
|
||||
if !util.HasProgressDeadline(d) || currentCond == nil {
|
||||
return time.Duration(-1)
|
||||
}
|
||||
// No need to estimate progress if the rollout is complete or already timed out.
|
||||
@ -186,11 +186,11 @@ func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newSt
|
||||
// Make it ratelimited so we stay on the safe side, eventually the Deployment should
|
||||
// transition either to a Complete or to a TimedOut condition.
|
||||
if after < time.Second {
|
||||
glog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name)
|
||||
klog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name)
|
||||
dc.enqueueRateLimited(d)
|
||||
return time.Duration(0)
|
||||
}
|
||||
glog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds()))
|
||||
klog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds()))
|
||||
// Add a second to avoid milliseconds skew in AddAfter.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
|
||||
dc.enqueueAfter(d, after+time.Second)
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -67,6 +68,7 @@ func newRSWithAvailable(name string, specReplicas, statusReplicas, availableRepl
|
||||
|
||||
func TestRequeueStuckDeployment(t *testing.T) {
|
||||
pds := int32(60)
|
||||
infinite := int32(math.MaxInt32)
|
||||
failed := []apps.DeploymentCondition{
|
||||
{
|
||||
Type: apps.DeploymentProgressing,
|
||||
@ -90,11 +92,17 @@ func TestRequeueStuckDeployment(t *testing.T) {
|
||||
expected time.Duration
|
||||
}{
|
||||
{
|
||||
name: "no progressDeadlineSeconds specified",
|
||||
name: "nil progressDeadlineSeconds specified",
|
||||
d: currentDeployment(nil, 4, 3, 3, 2, nil),
|
||||
status: newDeploymentStatus(3, 3, 2),
|
||||
expected: time.Duration(-1),
|
||||
},
|
||||
{
|
||||
name: "infinite progressDeadlineSeconds specified",
|
||||
d: currentDeployment(&infinite, 4, 3, 3, 2, nil),
|
||||
status: newDeploymentStatus(3, 3, 2),
|
||||
expected: time.Duration(-1),
|
||||
},
|
||||
{
|
||||
name: "no progressing condition found",
|
||||
d: currentDeployment(&pds, 4, 3, 3, 2, nil),
|
||||
@ -330,7 +338,7 @@ func TestSyncRolloutStatus(t *testing.T) {
|
||||
newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType)
|
||||
switch {
|
||||
case newCond == nil:
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil {
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil && *test.d.Spec.ProgressDeadlineSeconds != math.MaxInt32 {
|
||||
t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType)
|
||||
}
|
||||
case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason:
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
// rolloutRecreate implements the logic for recreating a replica set.
|
||||
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -51,7 +51,7 @@ func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*ap
|
||||
|
||||
// If we need to create a new RS, create it now.
|
||||
if newRS == nil {
|
||||
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/controller/deployment/rollback.go
generated
vendored
@ -20,18 +20,17 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
|
||||
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -50,11 +49,11 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
|
||||
for _, rs := range allRSs {
|
||||
v, err := deploymentutil.Revision(rs)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
|
||||
klog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
|
||||
continue
|
||||
}
|
||||
if v == rollbackTo.Revision {
|
||||
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
|
||||
klog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
|
||||
// rollback by copying podTemplate.Spec from the replica set
|
||||
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
|
||||
// no-op if the spec matches current deployment's podTemplate.Spec
|
||||
@ -76,7 +75,7 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl
|
||||
func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
|
||||
performedRollback := false
|
||||
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
|
||||
klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
|
||||
deploymentutil.SetFromReplicaSetTemplate(d, rs.Spec.Template)
|
||||
// set RS (the old RS we'll rolling back to) annotations back to the deployment;
|
||||
// otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback.
|
||||
@ -92,7 +91,7 @@ func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.
|
||||
deploymentutil.SetDeploymentAnnotationsTo(d, rs)
|
||||
performedRollback = true
|
||||
} else {
|
||||
glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
|
||||
klog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
|
||||
eventMsg := fmt.Sprintf("The rollback revision contains the same template as current deployment %q", d.Name)
|
||||
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg)
|
||||
}
|
||||
@ -112,7 +111,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess
|
||||
// It is assumed that the caller will have updated the deployment template appropriately (in case
|
||||
// we want to rollback).
|
||||
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
|
||||
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||
klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
|
||||
setRollbackTo(d, nil)
|
||||
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
|
||||
return err
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/deployment/rolling.go
generated
vendored
@ -20,18 +20,16 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
// rolloutRolling implements the logic for rolling a new replica set.
|
||||
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
|
||||
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -93,7 +91,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
|
||||
}
|
||||
|
||||
allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
glog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
|
||||
klog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
|
||||
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
|
||||
|
||||
// Check if we can scale down. We can scale down in the following 2 cases:
|
||||
@ -139,7 +137,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
|
||||
klog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
|
||||
|
||||
// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
|
||||
allRSs = append(oldRSs, newRS)
|
||||
@ -147,7 +145,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
|
||||
klog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
|
||||
|
||||
totalScaledDown := cleanupCount + scaledDownCount
|
||||
return totalScaledDown > 0, nil
|
||||
@ -168,7 +166,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaS
|
||||
// cannot scale down this replica set.
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
|
||||
klog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
|
||||
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
|
||||
// no unhealthy replicas found, no scaling required.
|
||||
continue
|
||||
@ -202,7 +200,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
||||
// Cannot scale down.
|
||||
return 0, nil
|
||||
}
|
||||
glog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
|
||||
klog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
|
||||
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||
|
||||
|
39
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
@ -22,21 +22,19 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
||||
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
|
||||
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -47,8 +45,8 @@ func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*app
|
||||
|
||||
// sync is responsible for reconciling deployments on scaling events or when they
|
||||
// are paused.
|
||||
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet) error {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -73,7 +71,7 @@ func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaS
|
||||
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
|
||||
// that were paused for longer than progressDeadlineSeconds.
|
||||
func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
|
||||
if d.Spec.ProgressDeadlineSeconds == nil {
|
||||
if !deploymentutil.HasProgressDeadline(d) {
|
||||
return nil
|
||||
}
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
@ -106,7 +104,6 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
|
||||
// getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated.
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForDeployment(d, rsList).
|
||||
//
|
||||
// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV).
|
||||
// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1),
|
||||
@ -115,7 +112,7 @@ func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error
|
||||
//
|
||||
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
|
||||
// This may lead to stale reads of replica sets, thus incorrect deployment status.
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
|
||||
|
||||
// Get new replica set with the updated revision number
|
||||
@ -161,7 +158,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
// of this deployment then it is likely that old users started caring about progress. In that
|
||||
// case we need to take into account the first time we noticed their new replica set.
|
||||
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil && cond == nil {
|
||||
if deploymentutil.HasProgressDeadline(d) && cond == nil {
|
||||
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
@ -183,7 +180,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
|
||||
// new ReplicaSet does not exist, create one.
|
||||
newRSTemplate := *d.Spec.Template.DeepCopy()
|
||||
podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount))
|
||||
podTemplateSpecHash := controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount)
|
||||
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
// Add podTemplateHash label to selector.
|
||||
newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
|
||||
@ -192,7 +189,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
newRS := apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Make the name deterministic, to ensure idempotence
|
||||
Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash),
|
||||
Name: d.Name + "-" + podTemplateSpecHash,
|
||||
Namespace: d.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
|
||||
Labels: newRSTemplate.Labels,
|
||||
@ -251,12 +248,12 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
// error.
|
||||
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
return nil, err
|
||||
case err != nil:
|
||||
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil {
|
||||
if deploymentutil.HasProgressDeadline(d) {
|
||||
cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *cond)
|
||||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||
@ -272,7 +269,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old
|
||||
}
|
||||
|
||||
needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision)
|
||||
if !alreadyExists && d.Spec.ProgressDeadlineSeconds != nil {
|
||||
if !alreadyExists && deploymentutil.HasProgressDeadline(d) {
|
||||
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
@ -427,7 +424,7 @@ func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale in
|
||||
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
|
||||
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
|
||||
func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
|
||||
if deployment.Spec.RevisionHistoryLimit == nil {
|
||||
if !deploymentutil.HasRevisionHistoryLimit(deployment) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -443,7 +440,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
|
||||
}
|
||||
|
||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(cleanableRSes))
|
||||
glog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
|
||||
klog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
|
||||
|
||||
for i := int32(0); i < diff; i++ {
|
||||
rs := cleanableRSes[i]
|
||||
@ -451,7 +448,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep
|
||||
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration || rs.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
||||
klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
|
||||
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
// Return error instead of aggregating and continuing DELETEs on the theory
|
||||
// that we may be overloading the api server.
|
||||
@ -520,8 +517,8 @@ func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployme
|
||||
//
|
||||
// rsList should come from getReplicaSetsForDeployment(d).
|
||||
// podMap should come from getPodMapForDeployment(d, rsList).
|
||||
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
|
||||
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet) (bool, error) {
|
||||
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -393,6 +394,16 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
revisionHistoryLimit: 0,
|
||||
expectedDeletions: 0,
|
||||
},
|
||||
{
|
||||
// with unlimited revisionHistoryLimit
|
||||
oldRSs: []*apps.ReplicaSet{
|
||||
newRSWithStatus("foo-1", 0, 0, selector),
|
||||
newRSWithStatus("foo-2", 0, 0, selector),
|
||||
newRSWithStatus("foo-3", 0, 0, selector),
|
||||
},
|
||||
revisionHistoryLimit: math.MaxInt32,
|
||||
expectedDeletions: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for i := range tests {
|
||||
@ -418,6 +429,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
informers.Start(stopCh)
|
||||
|
||||
t.Logf(" &test.revisionHistoryLimit: %d", test.revisionHistoryLimit)
|
||||
d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
|
||||
controller.cleanupDeployment(test.oldRSs, d)
|
||||
|
||||
|
56
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
56
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
@ -8,33 +8,23 @@ load(
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deployment_util.go",
|
||||
"pod_util.go",
|
||||
"replicaset_util.go",
|
||||
],
|
||||
srcs = ["deployment_util.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/deployment/util",
|
||||
deps = [
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -48,16 +38,16 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
67
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -18,12 +18,13 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -36,7 +37,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
"k8s.io/client-go/util/integer"
|
||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
@ -186,7 +186,7 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
} else if v > max {
|
||||
max = v
|
||||
}
|
||||
@ -200,7 +200,7 @@ func LastRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
} else if v >= max {
|
||||
secMax = max
|
||||
max = v
|
||||
@ -241,7 +241,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
|
||||
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
|
||||
if err != nil {
|
||||
if oldRevision != "" {
|
||||
glog.Warningf("Updating replica set revision OldRevision not int %s", err)
|
||||
klog.Warningf("Updating replica set revision OldRevision not int %s", err)
|
||||
return false
|
||||
}
|
||||
//If the RS annotation is empty then initialise it to 0
|
||||
@ -249,13 +249,13 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
|
||||
}
|
||||
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
|
||||
if err != nil {
|
||||
glog.Warningf("Updating replica set revision NewRevision not int %s", err)
|
||||
klog.Warningf("Updating replica set revision NewRevision not int %s", err)
|
||||
return false
|
||||
}
|
||||
if oldRevisionInt < newRevisionInt {
|
||||
newRS.Annotations[RevisionAnnotation] = newRevision
|
||||
annotationChanged = true
|
||||
glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
|
||||
klog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
|
||||
}
|
||||
// If a revision annotation already existed and this replica set was updated with a new revision
|
||||
// then that means we are rolling back to this replica set. We need to preserve the old revisions
|
||||
@ -376,7 +376,7 @@ func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, boo
|
||||
}
|
||||
intValue, err := strconv.Atoi(annotationValue)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
|
||||
klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
|
||||
return int32(0), false
|
||||
}
|
||||
return int32(intValue), true
|
||||
@ -575,29 +575,6 @@ func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps
|
||||
return owned, nil
|
||||
}
|
||||
|
||||
// ListReplicaSetsInternal is ListReplicaSets for internalextensions.
|
||||
// TODO: Remove the duplicate when call sites are updated to ListReplicaSets.
|
||||
func ListReplicaSetsInternal(deployment *internalextensions.Deployment, getRSList func(string, metav1.ListOptions) ([]*internalextensions.ReplicaSet, error)) ([]*internalextensions.ReplicaSet, error) {
|
||||
namespace := deployment.Namespace
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
all, err := getRSList(namespace, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Only include those whose ControllerRef matches the Deployment.
|
||||
filtered := make([]*internalextensions.ReplicaSet, 0, len(all))
|
||||
for _, rs := range all {
|
||||
if metav1.IsControlledBy(rs, deployment) {
|
||||
filtered = append(filtered, rs)
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// ListPods returns a list of pods the given deployment targets.
|
||||
// This needs a list of ReplicaSets for the Deployment,
|
||||
// which can be found with ListReplicaSets().
|
||||
@ -773,7 +750,7 @@ var nowFn = func() time.Time { return time.Now() }
|
||||
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
|
||||
// exists.
|
||||
func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
|
||||
if deployment.Spec.ProgressDeadlineSeconds == nil {
|
||||
if !HasProgressDeadline(deployment) {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -810,7 +787,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS
|
||||
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
|
||||
timedOut := from.Add(delta).Before(now)
|
||||
|
||||
glog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
|
||||
klog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
|
||||
return timedOut
|
||||
}
|
||||
|
||||
@ -876,19 +853,6 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: remove the duplicate
|
||||
// WaitForObservedInternalDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
|
||||
// Returns error if polling timesout.
|
||||
func WaitForObservedDeploymentInternal(getDeploymentFunc func() (*internalextensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
|
||||
return wait.Poll(interval, timeout, func() (bool, error) {
|
||||
deployment, err := getDeploymentFunc()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
|
||||
// step. For example:
|
||||
//
|
||||
@ -899,11 +863,11 @@ func WaitForObservedDeploymentInternal(getDeploymentFunc func() (*internalextens
|
||||
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
||||
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
|
||||
surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true)
|
||||
surge, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false)
|
||||
unavailable, err := intstrutil.GetValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
@ -918,3 +882,10 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired
|
||||
|
||||
return int32(surge), int32(unavailable), nil
|
||||
}
|
||||
|
||||
func HasProgressDeadline(d *apps.Deployment) bool {
|
||||
return d.Spec.ProgressDeadlineSeconds != nil && *d.Spec.ProgressDeadlineSeconds != math.MaxInt32
|
||||
}
|
||||
func HasRevisionHistoryLimit(d *apps.Deployment) bool {
|
||||
return d.Spec.RevisionHistoryLimit != nil && *d.Spec.RevisionHistoryLimit != math.MaxInt32
|
||||
}
|
||||
|
81
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
81
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util_test.go
generated
vendored
@ -18,6 +18,7 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sort"
|
||||
@ -150,6 +151,7 @@ func randomUID() types.UID {
|
||||
func generateDeployment(image string) apps.Deployment {
|
||||
podLabels := map[string]string{"name": image}
|
||||
terminationSec := int64(30)
|
||||
enableServiceLinks := v1.DefaultEnableServiceLinks
|
||||
return apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: image,
|
||||
@ -175,6 +177,7 @@ func generateDeployment(image string) apps.Deployment {
|
||||
TerminationGracePeriodSeconds: &terminationSec,
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
EnableServiceLinks: &enableServiceLinks,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -503,7 +506,6 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
Name string
|
||||
deployment apps.Deployment
|
||||
rsList []*apps.ReplicaSet
|
||||
podList *v1.PodList
|
||||
expected []*apps.ReplicaSet
|
||||
expectedRequire []*apps.ReplicaSet
|
||||
}{
|
||||
@ -617,52 +619,83 @@ func TestGetReplicaCountForReplicaSets(t *testing.T) {
|
||||
|
||||
func TestResolveFenceposts(t *testing.T) {
|
||||
tests := []struct {
|
||||
maxSurge string
|
||||
maxUnavailable string
|
||||
maxSurge *string
|
||||
maxUnavailable *string
|
||||
desired int32
|
||||
expectSurge int32
|
||||
expectUnavailable int32
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
maxSurge: "0%",
|
||||
maxUnavailable: "0%",
|
||||
maxSurge: newString("0%"),
|
||||
maxUnavailable: newString("0%"),
|
||||
desired: 0,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 1,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: "39%",
|
||||
maxUnavailable: "39%",
|
||||
maxSurge: newString("39%"),
|
||||
maxUnavailable: newString("39%"),
|
||||
desired: 10,
|
||||
expectSurge: 4,
|
||||
expectUnavailable: 3,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: "oops",
|
||||
maxUnavailable: "39%",
|
||||
maxSurge: newString("oops"),
|
||||
maxUnavailable: newString("39%"),
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
maxSurge: "55%",
|
||||
maxUnavailable: "urg",
|
||||
maxSurge: newString("55%"),
|
||||
maxUnavailable: newString("urg"),
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
maxSurge: nil,
|
||||
maxUnavailable: newString("39%"),
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 3,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: newString("39%"),
|
||||
maxUnavailable: nil,
|
||||
desired: 10,
|
||||
expectSurge: 4,
|
||||
expectUnavailable: 0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
maxSurge: nil,
|
||||
maxUnavailable: nil,
|
||||
desired: 10,
|
||||
expectSurge: 0,
|
||||
expectUnavailable: 1,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for num, test := range tests {
|
||||
t.Run("maxSurge="+test.maxSurge, func(t *testing.T) {
|
||||
maxSurge := intstr.FromString(test.maxSurge)
|
||||
maxUnavail := intstr.FromString(test.maxUnavailable)
|
||||
surge, unavail, err := ResolveFenceposts(&maxSurge, &maxUnavail, test.desired)
|
||||
t.Run(fmt.Sprintf("%d", num), func(t *testing.T) {
|
||||
var maxSurge, maxUnavail *intstr.IntOrString
|
||||
if test.maxSurge != nil {
|
||||
surge := intstr.FromString(*test.maxSurge)
|
||||
maxSurge = &surge
|
||||
}
|
||||
if test.maxUnavailable != nil {
|
||||
unavail := intstr.FromString(*test.maxUnavailable)
|
||||
maxUnavail = &unavail
|
||||
}
|
||||
surge, unavail, err := ResolveFenceposts(maxSurge, maxUnavail, test.desired)
|
||||
if err != nil && !test.expectError {
|
||||
t.Errorf("unexpected error %v", err)
|
||||
}
|
||||
@ -676,6 +709,10 @@ func TestResolveFenceposts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func newString(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func TestNewRSNewReplicas(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
@ -1068,8 +1105,9 @@ func TestDeploymentProgressing(t *testing.T) {
|
||||
|
||||
func TestDeploymentTimedOut(t *testing.T) {
|
||||
var (
|
||||
null *int32
|
||||
ten = int32(10)
|
||||
null *int32
|
||||
ten = int32(10)
|
||||
infinite = int32(math.MaxInt32)
|
||||
)
|
||||
|
||||
timeFn := func(min, sec int) time.Time {
|
||||
@ -1102,12 +1140,19 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no progressDeadlineSeconds specified - no timeout",
|
||||
name: "nil progressDeadlineSeconds specified - no timeout",
|
||||
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", null, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "infinite progressDeadlineSeconds specified - no timeout",
|
||||
|
||||
d: deployment(apps.DeploymentProgressing, v1.ConditionTrue, "", &infinite, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s",
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/hash_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/hash_test.go
generated
vendored
@ -105,7 +105,7 @@ var podSpec string = `
|
||||
`
|
||||
|
||||
func TestPodTemplateSpecHash(t *testing.T) {
|
||||
seenHashes := make(map[uint32]int)
|
||||
seenHashes := make(map[string]int)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
specJson := strings.Replace(podSpec, "@@VERSION@@", strconv.Itoa(i), 1)
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
// TODO: use client library instead when it starts to support update retries
|
||||
// see https://github.com/kubernetes/kubernetes/issues/21479
|
||||
type updatePodFunc func(pod *v1.Pod) error
|
||||
|
||||
// UpdatePodWithRetries updates a pod with given applyUpdate function.
|
||||
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
||||
var pod *v1.Pod
|
||||
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
var err error
|
||||
pod, err = podLister.Pods(namespace).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod = pod.DeepCopy()
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
if applyErr := applyUpdate(pod); applyErr != nil {
|
||||
return applyErr
|
||||
}
|
||||
pod, err = podClient.Update(pod)
|
||||
return err
|
||||
})
|
||||
|
||||
// Ignore the precondition violated error, this pod is already updated
|
||||
// with the desired label.
|
||||
if retryErr == errorsutil.ErrPreconditionViolated {
|
||||
glog.V(4).Infof("Pod %s/%s precondition doesn't hold, skip updating it.", namespace, name)
|
||||
retryErr = nil
|
||||
}
|
||||
|
||||
return pod, retryErr
|
||||
}
|
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/replicaset_util.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
errorsutil "k8s.io/apimachinery/pkg/util/errors"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
// TODO: use client library instead when it starts to support update retries
|
||||
// see https://github.com/kubernetes/kubernetes/issues/21479
|
||||
type updateRSFunc func(rs *apps.ReplicaSet) error
|
||||
|
||||
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
|
||||
// The returned bool value can be used to tell if the RS is actually updated.
|
||||
func UpdateRSWithRetries(rsClient appsclient.ReplicaSetInterface, rsLister appslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*apps.ReplicaSet, error) {
|
||||
var rs *apps.ReplicaSet
|
||||
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
var err error
|
||||
rs, err = rsLister.ReplicaSets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs = rs.DeepCopy()
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
if applyErr := applyUpdate(rs); applyErr != nil {
|
||||
return applyErr
|
||||
}
|
||||
rs, err = rsClient.Update(rs)
|
||||
return err
|
||||
})
|
||||
|
||||
// Ignore the precondition violated error, but the RS isn't updated.
|
||||
if retryErr == errorsutil.ErrPreconditionViolated {
|
||||
glog.V(4).Infof("Replica set %s/%s precondition doesn't hold, skip updating it.", namespace, name)
|
||||
retryErr = nil
|
||||
}
|
||||
|
||||
return rs, retryErr
|
||||
}
|
Reference in New Issue
Block a user