vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -19,22 +19,22 @@ go_library(
"//pkg/controller:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
@ -50,14 +50,14 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/securitycontext:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",

View File

@ -36,21 +36,21 @@ import (
"time"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/integer"
@ -90,7 +90,7 @@ type ReplicaSetController struct {
expectations *controller.UIDTrackingControllerExpectations
// A store of ReplicaSets, populated by the shared informer passed to NewReplicaSetController
rsLister extensionslisters.ReplicaSetLister
rsLister appslisters.ReplicaSetLister
// rsListerSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
rsListerSynced cache.InformerSynced
@ -106,12 +106,12 @@ type ReplicaSetController struct {
}
// NewReplicaSetController configures a replica set controller with the specified event recorder
func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas,
extensions.SchemeGroupVersion.WithKind("ReplicaSet"),
apps.SchemeGroupVersion.WithKind("ReplicaSet"),
"replicaset_controller",
"replicaset",
controller.RealPodControl{
@ -123,7 +123,7 @@ func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer,
// NewBaseController is the implementation of NewReplicaSetController with additional injected
// parameters so that it can also serve as the implementation of NewReplicationController.
func NewBaseController(rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface) *ReplicaSetController {
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter())
@ -194,7 +194,7 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
}
// getPodReplicaSets returns a list of ReplicaSets matching the given pod.
func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.ReplicaSet {
func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*apps.ReplicaSet {
rss, err := rsc.rsLister.GetPodReplicaSets(pod)
if err != nil {
return nil
@ -210,7 +210,7 @@ func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*extensions.Re
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.ReplicaSet {
func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.ReplicaSet {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != rsc.Kind {
@ -230,8 +230,8 @@ func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controll
// callback when RS is updated
func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
oldRS := old.(*extensions.ReplicaSet)
curRS := cur.(*extensions.ReplicaSet)
oldRS := old.(*apps.ReplicaSet)
curRS := cur.(*apps.ReplicaSet)
// You might imagine that we only really need to enqueue the
// replica set when Spec changes, but it is safer to sync any
@ -407,7 +407,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
rsc.enqueueReplicaSet(rs)
}
// obj could be an *extensions.ReplicaSet, or a DeletionFinalStateUnknown marker item.
// obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item.
func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
@ -417,7 +417,7 @@ func (rsc *ReplicaSetController) enqueueReplicaSet(obj interface{}) {
rsc.queue.Add(key)
}
// obj could be an *extensions.ReplicaSet, or a DeletionFinalStateUnknown marker item.
// obj could be an *apps.ReplicaSet, or a DeletionFinalStateUnknown marker item.
func (rsc *ReplicaSetController) enqueueReplicaSetAfter(obj interface{}, after time.Duration) {
key, err := controller.KeyFunc(obj)
if err != nil {
@ -456,7 +456,7 @@ func (rsc *ReplicaSetController) processNextWorkItem() bool {
// manageReplicas checks and updates replicas for the given ReplicaSet.
// Does NOT modify <filteredPods>.
// It will requeue the replica set in case of an error while creating/deleting pods.
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *extensions.ReplicaSet) error {
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps.ReplicaSet) error {
diff := len(filteredPods) - int(*(rs.Spec.Replicas))
rsKey, err := controller.KeyFunc(rs)
if err != nil {
@ -626,7 +626,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)
// Always updates status as pods come up or die.
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace), rs, newStatus)
updatedRS, err := updateReplicaSetStatus(rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace), rs, newStatus)
if err != nil {
// Multiple things could lead to this update failing. Requeuing the replica set ensures
// Returning an error causes a requeue without forcing a hotloop
@ -641,11 +641,11 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
return manageReplicasErr
}
func (rsc *ReplicaSetController) claimPods(rs *extensions.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
func (rsc *ReplicaSetController) claimPods(rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := rsc.kubeClient.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}

View File

@ -28,11 +28,12 @@ import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
@ -45,7 +46,6 @@ import (
"k8s.io/client-go/tools/cache"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/securitycontext"
)
@ -54,7 +54,7 @@ func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh ch
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
ret := NewReplicaSetController(
informers.Extensions().V1beta1().ReplicaSets(),
informers.Apps().V1().ReplicaSets(),
informers.Core().V1().Pods(),
client,
burstReplicas,
@ -78,7 +78,7 @@ func skipListerFunc(verb string, url url.URL) bool {
var alwaysReady = func() bool { return true }
func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
func getKey(rs *apps.ReplicaSet, t *testing.T) string {
if key, err := controller.KeyFunc(rs); err != nil {
t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err)
return ""
@ -87,16 +87,16 @@ func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
}
}
func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet {
rs := &extensions.ReplicaSet{
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {
rs := &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selectorMap},
Template: v1.PodTemplateSpec{
@ -128,7 +128,7 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl
}
// create a pod with the given phase for the given rs (same selectors and namespace)
func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {
func newPod(name string, rs *apps.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {
var conditions []v1.PodCondition
if status == v1.PodRunning {
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
@ -154,7 +154,7 @@ func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTran
}
// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *v1.PodList {
func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *apps.ReplicaSet, name string) *v1.PodList {
pods := []v1.Pod{}
var trueVar = true
controllerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
@ -203,7 +203,7 @@ func validateSyncReplicaSet(t *testing.T, fakePodControl *controller.FakePodCont
}
func TestSyncReplicaSetDoesNothing(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{})
defer close(stopCh)
@ -212,7 +212,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
// 2 running pods, a controller with 2 replicas, sync is a no-op
labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
manager.podControl = &fakePodControl
@ -221,7 +221,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
}
func TestDeleteFinalStateUnknown(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{})
defer close(stopCh)
@ -238,7 +238,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
// the controller matching the selectors of the deleted pod into the work queue.
labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(1, labelMap)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
@ -268,7 +268,7 @@ func TestSyncReplicaSetCreateFailures(t *testing.T) {
defer close(stopCh)
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
manager.podControl = &fakePodControl
manager.syncReplicaSet(getKey(rs, t))
@ -292,7 +292,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{})
@ -303,7 +303,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rsSpec, "pod")
// Creates a replica and sets expectations
@ -353,25 +353,25 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
func TestPodControllerLookup(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas)
manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas)
testCases := []struct {
inRSs []*extensions.ReplicaSet
inRSs []*apps.ReplicaSet
pod *v1.Pod
outRSName string
}{
// pods without labels don't match any ReplicaSets
{
inRSs: []*extensions.ReplicaSet{
inRSs: []*apps.ReplicaSet{
{ObjectMeta: metav1.ObjectMeta{Name: "basic"}}},
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}},
outRSName: "",
},
// Matching labels, not namespace
{
inRSs: []*extensions.ReplicaSet{
inRSs: []*apps.ReplicaSet{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
@ -383,10 +383,10 @@ func TestPodControllerLookup(t *testing.T) {
},
// Matching ns and labels returns the key to the ReplicaSet, not the ReplicaSet name
{
inRSs: []*extensions.ReplicaSet{
inRSs: []*apps.ReplicaSet{
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"},
Spec: extensions.ReplicaSetSpec{
Spec: apps.ReplicaSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
},
},
@ -399,7 +399,7 @@ func TestPodControllerLookup(t *testing.T) {
}
for _, c := range testCases {
for _, r := range c.inRSs {
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(r)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r)
}
if rss := manager.getPodReplicaSets(c.pod); rss != nil {
if len(rss) != 1 {
@ -424,25 +424,25 @@ func TestWatchControllers(t *testing.T) {
defer close(stopCh)
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
manager := NewReplicaSetController(
informers.Extensions().V1beta1().ReplicaSets(),
informers.Apps().V1().ReplicaSets(),
informers.Core().V1().Pods(),
client,
BurstReplicas,
)
informers.Start(stopCh)
var testRSSpec extensions.ReplicaSet
var testRSSpec apps.ReplicaSet
received := make(chan string)
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler. The handler validates the received controller
// and closes the received channel to indicate that the test can finish.
manager.syncHandler = func(key string) error {
obj, exists, err := informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
obj, exists, err := informers.Apps().V1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find replica set under key %v", key)
}
rsSpec := *obj.(*extensions.ReplicaSet)
rsSpec := *obj.(*apps.ReplicaSet)
if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec)
}
@ -477,7 +477,7 @@ func TestWatchPods(t *testing.T) {
// Put one ReplicaSet into the shared informer
labelMap := map[string]string{"foo": "bar"}
testRSSpec := newReplicaSet(1, labelMap)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec)
received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
@ -540,12 +540,12 @@ func TestUpdatePods(t *testing.T) {
// Put 2 ReplicaSets and one pod into the informers
labelMap1 := map[string]string{"foo": "bar"}
testRSSpec1 := newReplicaSet(1, labelMap1)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1)
testRSSpec2 := *testRSSpec1
labelMap2 := map[string]string{"bar": "foo"}
testRSSpec2.Spec.Selector = &metav1.LabelSelector{MatchLabels: labelMap2}
testRSSpec2.Name = "barfoo"
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2)
isController := true
controllerRef1 := metav1.OwnerReference{UID: testRSSpec1.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: testRSSpec1.Name, Controller: &isController}
@ -656,8 +656,8 @@ func TestControllerUpdateRequeue(t *testing.T) {
defer close(stopCh)
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
rs.Status = apps.ReplicaSetStatus{Replicas: 2}
newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rs, "pod")
fakePodControl := controller.FakePodControl{}
@ -678,11 +678,11 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil })
fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, &extensions.ReplicaSet{}, fmt.Errorf("Fake error")
return true, &apps.ReplicaSet{}, fmt.Errorf("Fake error")
})
fakeRSClient := fakeClient.Extensions().ReplicaSets("default")
fakeRSClient := fakeClient.Apps().ReplicaSets("default")
numReplicas := int32(10)
newStatus := extensions.ReplicaSetStatus{Replicas: numReplicas}
newStatus := apps.ReplicaSetStatus{Replicas: numReplicas}
updateReplicaSetStatus(fakeRSClient, rs, newStatus)
updates, gets := 0, 0
for _, a := range fakeClient.Actions() {
@ -702,7 +702,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
updates++
// Confirm that the update has the right status.Replicas even though the Get
// returned a ReplicaSet with replicas=1.
if c, ok := action.GetObject().(*extensions.ReplicaSet); !ok {
if c, ok := action.GetObject().(*apps.ReplicaSet); !ok {
t.Errorf("Expected a ReplicaSet as the argument to update, got %T", c)
} else if c.Status.Replicas != numReplicas {
t.Errorf("Expected update for ReplicaSet to contain replicas %v, got %v instead",
@ -729,7 +729,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, burstReplicas)
manager.podControl = &fakePodControl
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
expectedPods := int32(0)
pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod")
@ -743,7 +743,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
for _, replicas := range []int32{int32(numReplicas), 0} {
*(rsSpec.Spec.Replicas) = replicas
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
for i := 0; i < numReplicas; i += burstReplicas {
manager.syncReplicaSet(getKey(rsSpec, t))
@ -881,7 +881,7 @@ func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool {
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestRSSyncExpectations(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{})
defer close(stopCh)
@ -890,7 +890,7 @@ func TestRSSyncExpectations(t *testing.T) {
labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod")
informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[0])
postExpectationsPod := pods.Items[1]
@ -914,7 +914,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
defer close(stopCh)
manager, informers := testNewReplicaSetControllerFromClient(client, stopCh, 10)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
@ -936,7 +936,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
if !exists || err != nil {
t.Errorf("No expectations found for ReplicaSet")
}
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Delete(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
manager.syncReplicaSet(getKey(rs, t))
if _, exists, err = manager.expectations.GetExpectations(rsKey); exists {
@ -951,10 +951,10 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
}
// shuffle returns a new shuffled list of container controllers.
func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
func shuffle(controllers []*apps.ReplicaSet) []*apps.ReplicaSet {
numControllers := len(controllers)
randIndexes := rand.Perm(numControllers)
shuffled := make([]*extensions.ReplicaSet, numControllers)
shuffled := make([]*apps.ReplicaSet, numControllers)
for i := 0; i < numControllers; i++ {
shuffled[i] = controllers[randIndexes[i]]
}
@ -962,7 +962,7 @@ func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
}
func TestOverlappingRSs(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
labelMap := map[string]string{"foo": "bar"}
stopCh := make(chan struct{})
@ -974,7 +974,7 @@ func TestOverlappingRSs(t *testing.T) {
// All use the same CreationTimestamp since ControllerRef should be able
// to handle that.
timestamp := metav1.Date(2014, time.December, 0, 0, 0, 0, 0, time.Local)
var controllers []*extensions.ReplicaSet
var controllers []*apps.ReplicaSet
for j := 1; j < 10; j++ {
rsSpec := newReplicaSet(1, labelMap)
rsSpec.CreationTimestamp = timestamp
@ -983,7 +983,7 @@ func TestOverlappingRSs(t *testing.T) {
}
shuffledControllers := shuffle(controllers)
for j := range shuffledControllers {
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j])
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j])
}
// Add a pod with a ControllerRef and make sure only the corresponding
// ReplicaSet is synced. Pick a RS in the middle since the old code used to
@ -1005,14 +1005,14 @@ func TestOverlappingRSs(t *testing.T) {
}
func TestDeletionTimestamp(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
labelMap := map[string]string{"foo": "bar"}
stopCh := make(chan struct{})
defer close(stopCh)
manager, informers := testNewReplicaSetControllerFromClient(c, stopCh, 10)
rs := newReplicaSet(1, labelMap)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
rsKey, err := controller.KeyFunc(rs)
if err != nil {
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
@ -1116,7 +1116,7 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
var trueVar = true
otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
// add to podLister a matching Pod controlled by another controller. Expect no patch.
@ -1137,7 +1137,7 @@ func TestPatchPodFails(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
// add to podLister two matching pods. Expect two patches to take control
// them.
informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil, false))
@ -1169,7 +1169,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
manager, fakePodControl, informers := setupManagerWithGCEnabled(stopCh, rs)
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(rs)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
pod1 := newPod("pod1", rs, v1.PodRunning, nil, false)
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
@ -1193,7 +1193,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
// Lister (cache) says it's NOT deleted.
rs2 := *rs
rs2.DeletionTimestamp = nil
informers.Extensions().V1beta1().ReplicaSets().Informer().GetIndexer().Add(&rs2)
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&rs2)
// Recheck occurs if a matching orphan is present.
pod1 := newPod("pod1", rs, v1.PodRunning, nil, false)
@ -1209,35 +1209,35 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
}
var (
imagePullBackOff extensions.ReplicaSetConditionType = "ImagePullBackOff"
imagePullBackOff apps.ReplicaSetConditionType = "ImagePullBackOff"
condImagePullBackOff = func() extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{
condImagePullBackOff = func() apps.ReplicaSetCondition {
return apps.ReplicaSetCondition{
Type: imagePullBackOff,
Status: v1.ConditionTrue,
Reason: "NonExistentImage",
}
}
condReplicaFailure = func() extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{
Type: extensions.ReplicaSetReplicaFailure,
condReplicaFailure = func() apps.ReplicaSetCondition {
return apps.ReplicaSetCondition{
Type: apps.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
Reason: "OtherFailure",
}
}
condReplicaFailure2 = func() extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{
Type: extensions.ReplicaSetReplicaFailure,
condReplicaFailure2 = func() apps.ReplicaSetCondition {
return apps.ReplicaSetCondition{
Type: apps.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
Reason: "AnotherFailure",
}
}
status = func() *extensions.ReplicaSetStatus {
return &extensions.ReplicaSetStatus{
Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()},
status = func() *apps.ReplicaSetStatus {
return &apps.ReplicaSetStatus{
Conditions: []apps.ReplicaSetCondition{condReplicaFailure()},
}
}
)
@ -1248,8 +1248,8 @@ func TestGetCondition(t *testing.T) {
tests := []struct {
name string
status extensions.ReplicaSetStatus
condType extensions.ReplicaSetConditionType
status apps.ReplicaSetStatus
condType apps.ReplicaSetConditionType
condStatus v1.ConditionStatus
condReason string
@ -1259,7 +1259,7 @@ func TestGetCondition(t *testing.T) {
name: "condition exists",
status: *exampleStatus,
condType: extensions.ReplicaSetReplicaFailure,
condType: apps.ReplicaSetReplicaFailure,
expected: true,
},
@ -1286,34 +1286,34 @@ func TestSetCondition(t *testing.T) {
tests := []struct {
name string
status *extensions.ReplicaSetStatus
cond extensions.ReplicaSetCondition
status *apps.ReplicaSetStatus
cond apps.ReplicaSetCondition
expectedStatus *extensions.ReplicaSetStatus
expectedStatus *apps.ReplicaSetStatus
}{
{
name: "set for the first time",
status: &extensions.ReplicaSetStatus{},
status: &apps.ReplicaSetStatus{},
cond: condReplicaFailure(),
expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}},
expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
},
{
name: "simple set",
status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condImagePullBackOff()}},
status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff()}},
cond: condReplicaFailure(),
expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condImagePullBackOff(), condReplicaFailure()}},
expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff(), condReplicaFailure()}},
},
{
name: "overwrite",
status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}},
status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
cond: condReplicaFailure2(),
expectedStatus: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure2()}},
expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure2()}},
},
}
@ -1329,26 +1329,26 @@ func TestRemoveCondition(t *testing.T) {
tests := []struct {
name string
status *extensions.ReplicaSetStatus
condType extensions.ReplicaSetConditionType
status *apps.ReplicaSetStatus
condType apps.ReplicaSetConditionType
expectedStatus *extensions.ReplicaSetStatus
expectedStatus *apps.ReplicaSetStatus
}{
{
name: "remove from empty status",
status: &extensions.ReplicaSetStatus{},
condType: extensions.ReplicaSetReplicaFailure,
status: &apps.ReplicaSetStatus{},
condType: apps.ReplicaSetReplicaFailure,
expectedStatus: &extensions.ReplicaSetStatus{},
expectedStatus: &apps.ReplicaSetStatus{},
},
{
name: "simple remove",
status: &extensions.ReplicaSetStatus{Conditions: []extensions.ReplicaSetCondition{condReplicaFailure()}},
condType: extensions.ReplicaSetReplicaFailure,
status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
condType: apps.ReplicaSetReplicaFailure,
expectedStatus: &extensions.ReplicaSetStatus{},
expectedStatus: &apps.ReplicaSetStatus{},
},
{
name: "doesn't remove anything",

View File

@ -24,16 +24,16 @@ import (
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
)
// updateReplicaSetStatus attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (*extensions.ReplicaSet, error) {
func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSet, newStatus apps.ReplicaSetStatus) (*apps.ReplicaSet, error) {
// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
// we do a periodic relist every 30s. If the generations differ but the replicas are
// the same, a caller might've resized to the same replica count.
@ -53,7 +53,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *ext
newStatus.ObservedGeneration = rs.Generation
var getErr, updateErr error
var updatedRS *extensions.ReplicaSet
var updatedRS *apps.ReplicaSet
for i, rs := 0, rs; ; i++ {
glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) +
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
@ -82,7 +82,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs *ext
return nil, updateErr
}
func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
func calculateStatus(rs *apps.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) apps.ReplicaSetStatus {
newStatus := rs.Status
// Count the number of pods that have labels matching the labels of the pod
// template of the replica set, the matching pods may have more
@ -105,7 +105,7 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe
}
}
failureCond := GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
failureCond := GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
if manageReplicasErr != nil && failureCond == nil {
var reason string
if diff := len(filteredPods) - int(*(rs.Spec.Replicas)); diff < 0 {
@ -113,10 +113,10 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe
} else if diff > 0 {
reason = "FailedDelete"
}
cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
cond := NewReplicaSetCondition(apps.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
SetCondition(&newStatus, cond)
} else if manageReplicasErr == nil && failureCond != nil {
RemoveCondition(&newStatus, extensions.ReplicaSetReplicaFailure)
RemoveCondition(&newStatus, apps.ReplicaSetReplicaFailure)
}
newStatus.Replicas = int32(len(filteredPods))
@ -127,8 +127,8 @@ func calculateStatus(rs *extensions.ReplicaSet, filteredPods []*v1.Pod, manageRe
}
// NewReplicaSetCondition creates a new replicaset condition.
func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{
func NewReplicaSetCondition(condType apps.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) apps.ReplicaSetCondition {
return apps.ReplicaSetCondition{
Type: condType,
Status: status,
LastTransitionTime: metav1.Now(),
@ -138,7 +138,7 @@ func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status
}
// GetCondition returns a replicaset condition with the provided type if it exists.
func GetCondition(status extensions.ReplicaSetStatus, condType extensions.ReplicaSetConditionType) *extensions.ReplicaSetCondition {
func GetCondition(status apps.ReplicaSetStatus, condType apps.ReplicaSetConditionType) *apps.ReplicaSetCondition {
for _, c := range status.Conditions {
if c.Type == condType {
return &c
@ -149,7 +149,7 @@ func GetCondition(status extensions.ReplicaSetStatus, condType extensions.Replic
// SetCondition adds/replaces the given condition in the replicaset status. If the condition that we
// are about to add already exists and has the same status and reason then we are not going to update.
func SetCondition(status *extensions.ReplicaSetStatus, condition extensions.ReplicaSetCondition) {
func SetCondition(status *apps.ReplicaSetStatus, condition apps.ReplicaSetCondition) {
currentCond := GetCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
@ -159,13 +159,13 @@ func SetCondition(status *extensions.ReplicaSetStatus, condition extensions.Repl
}
// RemoveCondition removes the condition with the provided type from the replicaset status.
func RemoveCondition(status *extensions.ReplicaSetStatus, condType extensions.ReplicaSetConditionType) {
func RemoveCondition(status *apps.ReplicaSetStatus, condType apps.ReplicaSetConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of replicaset conditions without conditions with the provided type.
func filterOutCondition(conditions []extensions.ReplicaSetCondition, condType extensions.ReplicaSetConditionType) []extensions.ReplicaSetCondition {
var newConditions []extensions.ReplicaSetCondition
func filterOutCondition(conditions []apps.ReplicaSetCondition, condType apps.ReplicaSetConditionType) []apps.ReplicaSetCondition {
var newConditions []apps.ReplicaSetCondition
for _, c := range conditions {
if c.Type == condType {
continue

View File

@ -23,8 +23,8 @@ import (
"reflect"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
)
func TestCalculateStatus(t *testing.T) {
@ -38,9 +38,9 @@ func TestCalculateStatus(t *testing.T) {
rsStatusTests := []struct {
name string
replicaset *extensions.ReplicaSet
replicaset *apps.ReplicaSet
filteredPods []*v1.Pod
expectedReplicaSetStatus extensions.ReplicaSetStatus
expectedReplicaSetStatus apps.ReplicaSetStatus
}{
{
"1 fully labelled pod",
@ -48,7 +48,7 @@ func TestCalculateStatus(t *testing.T) {
[]*v1.Pod{
newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
apps.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 1,
ReadyReplicas: 1,
@ -61,7 +61,7 @@ func TestCalculateStatus(t *testing.T) {
[]*v1.Pod{
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
apps.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 0,
ReadyReplicas: 1,
@ -75,7 +75,7 @@ func TestCalculateStatus(t *testing.T) {
newPod("pod1", fullyLabelledRS, v1.PodRunning, nil, true),
newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
apps.ReplicaSetStatus{
Replicas: 2,
FullyLabeledReplicas: 2,
ReadyReplicas: 2,
@ -89,7 +89,7 @@ func TestCalculateStatus(t *testing.T) {
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
newPod("pod2", notFullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
apps.ReplicaSetStatus{
Replicas: 2,
FullyLabeledReplicas: 0,
ReadyReplicas: 2,
@ -103,7 +103,7 @@ func TestCalculateStatus(t *testing.T) {
newPod("pod1", notFullyLabelledRS, v1.PodRunning, nil, true),
newPod("pod2", fullyLabelledRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
apps.ReplicaSetStatus{
Replicas: 2,
FullyLabeledReplicas: 1,
ReadyReplicas: 2,
@ -116,7 +116,7 @@ func TestCalculateStatus(t *testing.T) {
[]*v1.Pod{
newPod("pod1", fullyLabelledRS, v1.PodPending, nil, true),
},
extensions.ReplicaSetStatus{
apps.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 1,
ReadyReplicas: 0,
@ -129,7 +129,7 @@ func TestCalculateStatus(t *testing.T) {
[]*v1.Pod{
newPod("pod1", longMinReadySecondsRS, v1.PodRunning, nil, true),
},
extensions.ReplicaSetStatus{
apps.ReplicaSetStatus{
Replicas: 1,
FullyLabeledReplicas: 1,
ReadyReplicas: 1,
@ -150,19 +150,19 @@ func TestCalculateStatusConditions(t *testing.T) {
labelMap := map[string]string{"name": "foo"}
rs := newReplicaSet(2, labelMap)
replicaFailureRS := newReplicaSet(10, labelMap)
replicaFailureRS.Status.Conditions = []extensions.ReplicaSetCondition{
replicaFailureRS.Status.Conditions = []apps.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Type: apps.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
},
}
rsStatusConditionTests := []struct {
name string
replicaset *extensions.ReplicaSet
replicaset *apps.ReplicaSet
filteredPods []*v1.Pod
manageReplicasErr error
expectedReplicaSetConditions []extensions.ReplicaSetCondition
expectedReplicaSetConditions []apps.ReplicaSetCondition
}{
{
@ -172,9 +172,9 @@ func TestCalculateStatusConditions(t *testing.T) {
newPod("pod1", rs, v1.PodRunning, nil, true),
},
fmt.Errorf("fake manageReplicasErr"),
[]extensions.ReplicaSetCondition{
[]apps.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Type: apps.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
Reason: "FailedCreate",
Message: "fake manageReplicasErr",
@ -190,9 +190,9 @@ func TestCalculateStatusConditions(t *testing.T) {
newPod("pod3", rs, v1.PodRunning, nil, true),
},
fmt.Errorf("fake manageReplicasErr"),
[]extensions.ReplicaSetCondition{
[]apps.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Type: apps.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
Reason: "FailedDelete",
Message: "fake manageReplicasErr",
@ -215,9 +215,9 @@ func TestCalculateStatusConditions(t *testing.T) {
newPod("pod1", replicaFailureRS, v1.PodRunning, nil, true),
},
fmt.Errorf("fake manageReplicasErr"),
[]extensions.ReplicaSetCondition{
[]apps.ReplicaSetCondition{
{
Type: extensions.ReplicaSetReplicaFailure,
Type: apps.ReplicaSetReplicaFailure,
Status: v1.ConditionTrue,
},
},