Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -18,27 +18,27 @@ go_library(
"//pkg/api/v1/pod:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -51,25 +51,26 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/securitycontext:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
],
)
@ -82,9 +83,6 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/replicaset/options:all-srcs",
],
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -7,3 +7,5 @@ reviewers:
- lavalamp
- tnozicka
- enisoc
labels:
- sig/apps

View File

@ -1,26 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["options.go"],
importpath = "k8s.io/kubernetes/pkg/controller/replicaset/options",
deps = ["//vendor/github.com/spf13/pflag:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,35 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
)
type ReplicasetControllerOptions struct {
ConcurrentRSSyncs int
}
func NewReplicasetControllerOptions() ReplicasetControllerOptions {
return ReplicasetControllerOptions{
ConcurrentRSSyncs: 5,
}
}
func (o *ReplicasetControllerOptions) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&o.ConcurrentRSSyncs, "concurrent-replicaset-syncs", o.ConcurrentRSSyncs, "The number of replicasets that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load")
}

View File

@ -35,7 +35,6 @@ import (
"sync"
"time"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -55,6 +54,7 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/integer"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/metrics"
@ -108,7 +108,7 @@ type ReplicaSetController struct {
// NewReplicaSetController configures a replica set controller with the specified event recorder
func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas,
apps.SchemeGroupVersion.WithKind("ReplicaSet"),
@ -179,8 +179,8 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
defer rsc.queue.ShutDown()
controllerName := strings.ToLower(rsc.Kind)
glog.Infof("Starting %v controller", controllerName)
defer glog.Infof("Shutting down %v controller", controllerName)
klog.Infof("Starting %v controller", controllerName)
defer klog.Infof("Shutting down %v controller", controllerName)
if !controller.WaitForCacheSync(rsc.Kind, stopCh, rsc.podListerSynced, rsc.rsListerSynced) {
return
@ -246,7 +246,7 @@ func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
// that bad as ReplicaSets that haven't met expectations yet won't
// sync, and all the listing is done using local stores.
if *(oldRS.Spec.Replicas) != *(curRS.Spec.Replicas) {
glog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas))
klog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas))
}
rsc.enqueueReplicaSet(cur)
}
@ -272,7 +272,7 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) {
if err != nil {
return
}
glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
klog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
rsc.expectations.CreationObserved(rsKey)
rsc.enqueueReplicaSet(rs)
return
@ -286,7 +286,7 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) {
if len(rss) == 0 {
return
}
glog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod)
klog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod)
for _, rs := range rss {
rsc.enqueueReplicaSet(rs)
}
@ -335,7 +335,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
if rs == nil {
return
}
glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
klog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
rsc.enqueueReplicaSet(rs)
// TODO: MinReadySeconds in the Pod will generate an Available condition to be added in
// the Pod status which in turn will trigger a requeue of the owning replica set thus
@ -345,7 +345,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
// Note that this still suffers from #29229, we are just moving the problem one level
// "closer" to kubelet (from the deployment to the replica set controller).
if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 {
glog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds)
klog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds)
// Add a second to avoid milliseconds skew in AddAfter.
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
rsc.enqueueReplicaSetAfter(rs, (time.Duration(rs.Spec.MinReadySeconds)*time.Second)+time.Second)
@ -360,7 +360,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
if len(rss) == 0 {
return
}
glog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
klog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
for _, rs := range rss {
rsc.enqueueReplicaSet(rs)
}
@ -402,7 +402,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
if err != nil {
return
}
glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
klog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod)
rsc.expectations.DeletionObserved(rsKey, controller.PodKey(pod))
rsc.enqueueReplicaSet(rs)
}
@ -474,7 +474,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
// into a performance bottleneck. We should generate a UID for the pod
// beforehand and store it via ExpectCreations.
rsc.expectations.ExpectCreations(rsKey, diff)
glog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
klog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
// and double with each successful iteration in a kind of "slow start".
// This handles attempts to start large numbers of pods that would
@ -511,7 +511,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
// The skipped pods will be retried later. The next controller resync will
// retry the slow start process.
if skippedPods := diff - successfulCreations; skippedPods > 0 {
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name)
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name)
for i := 0; i < skippedPods; i++ {
// Decrement the expected number of creates because the informer won't observe this pod
rsc.expectations.CreationObserved(rsKey)
@ -522,7 +522,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
if diff > rsc.burstReplicas {
diff = rsc.burstReplicas
}
glog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
klog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
// Choose which Pods to delete, preferring those in earlier phases of startup.
podsToDelete := getPodsToDelete(filteredPods, diff)
@ -544,7 +544,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil {
// Decrement the expected number of deletes because the informer won't observe this deletion
podKey := controller.PodKey(targetPod)
glog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name)
klog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name)
rsc.expectations.DeletionObserved(rsKey, podKey)
errCh <- err
}
@ -572,7 +572,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime))
klog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
@ -581,7 +581,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
}
rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(4).Infof("%v %v has been deleted", rsc.Kind, key)
klog.V(4).Infof("%v %v has been deleted", rsc.Kind, key)
rsc.expectations.DeleteExpectations(key)
return nil
}

View File

@ -47,6 +47,7 @@ import (
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
. "k8s.io/kubernetes/pkg/controller/testutil"
"k8s.io/kubernetes/pkg/securitycontext"
)
@ -78,18 +79,9 @@ func skipListerFunc(verb string, url url.URL) bool {
var alwaysReady = func() bool { return true }
func getKey(rs *apps.ReplicaSet, t *testing.T) string {
if key, err := controller.KeyFunc(rs); err != nil {
t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err)
return ""
} else {
return key
}
}
func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {
rs := &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ReplicaSet"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
@ -109,7 +101,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
Image: "foo/bar",
TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
@ -216,7 +208,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
manager.podControl = &fakePodControl
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
}
@ -244,7 +236,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
go manager.worker()
expected := getKey(rsSpec, t)
expected := GetKey(rsSpec, t)
select {
case key := <-received:
if key != expected {
@ -271,7 +263,7 @@ func TestSyncReplicaSetCreateFailures(t *testing.T) {
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
manager.podControl = &fakePodControl
manager.syncReplicaSet(getKey(rs, t))
manager.syncReplicaSet(GetKey(rs, t))
validateSyncReplicaSet(t, &fakePodControl, fakePodControl.CreateLimit, 0, 0)
expectedLimit := 0
for pass := uint8(0); expectedLimit <= fakePodControl.CreateLimit; pass++ {
@ -310,7 +302,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
rsSpec.Status.Replicas = 1
rsSpec.Status.ReadyReplicas = 1
rsSpec.Status.AvailableReplicas = 1
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
// Expectations prevents replicas but not an update on status
@ -318,7 +310,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
rsSpec.Status.ReadyReplicas = 0
rsSpec.Status.AvailableReplicas = 0
fakePodControl.Clear()
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
// Get the key for the controller
@ -336,13 +328,13 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
fakePodControl.Clear()
fakePodControl.Err = fmt.Errorf("Fake Error")
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
// This replica should not need a Lowering of expectations, since the previous create failed
fakePodControl.Clear()
fakePodControl.Err = nil
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
// 2 PUT for the ReplicaSet status during dormancy window.
@ -746,7 +738,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
for i := 0; i < numReplicas; i += burstReplicas {
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
// The store accrues active pods. It's also used by the ReplicaSet to determine how many
// replicas to create.
@ -785,7 +777,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// To accurately simulate a watch we must delete the exact pods
// the rs is waiting for.
expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t))
expectedDels := manager.expectations.GetUIDs(GetKey(rsSpec, t))
podsToDelete := []*v1.Pod{}
isController := true
for _, key := range expectedDels.List() {
@ -819,7 +811,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// Check that the ReplicaSet didn't take any action for all the above pods
fakePodControl.Clear()
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
// Create/Delete the last pod
@ -829,7 +821,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[expectedPods-1])
manager.addPod(&pods.Items[expectedPods-1])
} else {
expectedDel := manager.expectations.GetUIDs(getKey(rsSpec, t))
expectedDel := manager.expectations.GetUIDs(GetKey(rsSpec, t))
if expectedDel.Len() != 1 {
t.Fatalf("Waiting on unexpected number of deletes.")
}
@ -903,7 +895,7 @@ func TestRSSyncExpectations(t *testing.T) {
informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
},
})
manager.syncReplicaSet(getKey(rsSpec, t))
manager.syncReplicaSet(GetKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
}
@ -920,7 +912,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
manager.podControl = &fakePodControl
// This should set expectations for the ReplicaSet
manager.syncReplicaSet(getKey(rs, t))
manager.syncReplicaSet(GetKey(rs, t))
validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0)
fakePodControl.Clear()
@ -937,7 +929,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
t.Errorf("No expectations found for ReplicaSet")
}
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
manager.syncReplicaSet(getKey(rs, t))
manager.syncReplicaSet(GetKey(rs, t))
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
t.Errorf("Found expectaions, expected none since the ReplicaSet has been deleted.")
@ -946,7 +938,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
// This should have no effect, since we've deleted the ReplicaSet.
podExp.Add(-1, 0)
informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
manager.syncReplicaSet(getKey(rs, t))
manager.syncReplicaSet(GetKey(rs, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0)
}
@ -995,7 +987,7 @@ func TestOverlappingRSs(t *testing.T) {
pod.OwnerReferences = []metav1.OwnerReference{
{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name, Controller: &isController},
}
rsKey := getKey(rs, t)
rsKey := GetKey(rs, t)
manager.addPod(pod)
queueRS, _ := manager.queue.Get()
@ -1123,7 +1115,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
pod := newPod("pod", rs, v1.PodRunning, nil, true)
pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
err := manager.syncReplicaSet(getKey(rs, t))
err := manager.syncReplicaSet(GetKey(rs, t))
if err != nil {
t.Fatal(err)
}
@ -1145,7 +1137,7 @@ func TestPatchPodFails(t *testing.T) {
// let both patches fail. The rs controller will assume it fails to take
// control of the pods and requeue to try again.
fakePodControl.Err = fmt.Errorf("Fake Error")
rsKey := getKey(rs, t)
rsKey := GetKey(rs, t)
err := processSync(manager, rsKey)
if err == nil || !strings.Contains(err.Error(), "Fake Error") {
t.Errorf("expected Fake Error, got %+v", err)
@ -1174,7 +1166,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
// no patch, no create
err := manager.syncReplicaSet(getKey(rs, t))
err := manager.syncReplicaSet(GetKey(rs, t))
if err != nil {
t.Fatal(err)
}
@ -1200,7 +1192,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
// sync should abort.
err := manager.syncReplicaSet(getKey(rs, t))
err := manager.syncReplicaSet(GetKey(rs, t))
if err == nil {
t.Error("syncReplicaSet() err = nil, expected non-nil")
}
@ -1248,10 +1240,8 @@ func TestGetCondition(t *testing.T) {
tests := []struct {
name string
status apps.ReplicaSetStatus
condType apps.ReplicaSetConditionType
condStatus v1.ConditionStatus
condReason string
status apps.ReplicaSetStatus
condType apps.ReplicaSetConditionType
expected bool
}{

View File

@ -22,7 +22,7 @@ import (
"fmt"
"reflect"
"github.com/golang/glog"
"k8s.io/klog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
@ -55,7 +55,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe
var getErr, updateErr error
var updatedRS *apps.ReplicaSet
for i, rs := 0, rs; ; i++ {
glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) +
klog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) +
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) +