mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor updates
This commit is contained in:
10
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
@ -12,8 +12,7 @@ go_test(
|
||||
"controller_ref_manager_test.go",
|
||||
"controller_utils_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
@ -62,7 +61,7 @@ go_library(
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/groupcache/lru:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
@ -117,7 +116,8 @@ filegroup(
|
||||
"//pkg/controller/history:all-srcs",
|
||||
"//pkg/controller/job:all-srcs",
|
||||
"//pkg/controller/namespace:all-srcs",
|
||||
"//pkg/controller/node:all-srcs",
|
||||
"//pkg/controller/nodeipam:all-srcs",
|
||||
"//pkg/controller/nodelifecycle:all-srcs",
|
||||
"//pkg/controller/podautoscaler:all-srcs",
|
||||
"//pkg/controller/podgc:all-srcs",
|
||||
"//pkg/controller/replicaset:all-srcs",
|
||||
@ -129,11 +129,13 @@ filegroup(
|
||||
"//pkg/controller/statefulset:all-srcs",
|
||||
"//pkg/controller/testutil:all-srcs",
|
||||
"//pkg/controller/ttl:all-srcs",
|
||||
"//pkg/controller/util/node:all-srcs",
|
||||
"//pkg/controller/volume/attachdetach:all-srcs",
|
||||
"//pkg/controller/volume/events:all-srcs",
|
||||
"//pkg/controller/volume/expand:all-srcs",
|
||||
"//pkg/controller/volume/persistentvolume:all-srcs",
|
||||
"//pkg/controller/volume/pvcprotection:all-srcs",
|
||||
"//pkg/controller/volume/pvprotection:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
19
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/BUILD
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/BUILD
generated
vendored
@ -15,19 +15,21 @@ go_test(
|
||||
"tokencleaner_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/bootstrap",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/bootstrap/api:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -43,19 +45,20 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/bootstrap",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/bootstrap/api:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/square/go-jose:go_default_library",
|
||||
"//vendor/gopkg.in/square/go-jose.v2:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
|
150
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner.go
generated
vendored
150
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner.go
generated
vendored
@ -22,25 +22,25 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
|
||||
// BootstrapSignerOptions contains options for the BootstrapSigner
|
||||
type BootstrapSignerOptions struct {
|
||||
|
||||
// ConfigMapNamespace is the namespace of the ConfigMap
|
||||
ConfigMapNamespace string
|
||||
|
||||
@ -71,88 +71,101 @@ func DefaultBootstrapSignerOptions() BootstrapSignerOptions {
|
||||
|
||||
// BootstrapSigner is a controller that signs a ConfigMap with a set of tokens.
|
||||
type BootstrapSigner struct {
|
||||
client clientset.Interface
|
||||
configMapKey string
|
||||
secretNamespace string
|
||||
|
||||
configMaps cache.Store
|
||||
secrets cache.Store
|
||||
client clientset.Interface
|
||||
configMapKey string
|
||||
configMapName string
|
||||
configMapNamespace string
|
||||
secretNamespace string
|
||||
|
||||
// syncQueue handles synchronizing updates to the ConfigMap. We'll only ever
|
||||
// have one item (Named <ConfigMapName>) in this queue. We are using it
|
||||
// serializes and collapses updates as they can come from both the ConfigMap
|
||||
// and Secrets controllers.
|
||||
syncQueue workqueue.Interface
|
||||
syncQueue workqueue.RateLimitingInterface
|
||||
|
||||
// Since we join two objects, we'll watch both of them with controllers.
|
||||
configMapsController cache.Controller
|
||||
secretsController cache.Controller
|
||||
secretLister corelisters.SecretLister
|
||||
secretSynced cache.InformerSynced
|
||||
|
||||
configMapLister corelisters.ConfigMapLister
|
||||
configMapSynced cache.InformerSynced
|
||||
}
|
||||
|
||||
// NewBootstrapSigner returns a new *BootstrapSigner.
|
||||
//
|
||||
// TODO: Switch to shared informers
|
||||
func NewBootstrapSigner(cl clientset.Interface, options BootstrapSignerOptions) (*BootstrapSigner, error) {
|
||||
func NewBootstrapSigner(cl clientset.Interface, secrets informers.SecretInformer, configMaps informers.ConfigMapInformer, options BootstrapSignerOptions) (*BootstrapSigner, error) {
|
||||
e := &BootstrapSigner{
|
||||
client: cl,
|
||||
configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName,
|
||||
secretNamespace: options.TokenSecretNamespace,
|
||||
syncQueue: workqueue.NewNamed("bootstrap_signer_queue"),
|
||||
client: cl,
|
||||
configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName,
|
||||
configMapName: options.ConfigMapName,
|
||||
configMapNamespace: options.ConfigMapNamespace,
|
||||
secretNamespace: options.TokenSecretNamespace,
|
||||
secretLister: secrets.Lister(),
|
||||
secretSynced: secrets.Informer().HasSynced,
|
||||
configMapLister: configMaps.Lister(),
|
||||
configMapSynced: configMaps.Informer().HasSynced,
|
||||
syncQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "bootstrap_signer_queue"),
|
||||
}
|
||||
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
configMapSelector := fields.SelectorFromSet(map[string]string{api.ObjectNameField: options.ConfigMapName})
|
||||
e.configMaps, e.configMapsController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
|
||||
lo.FieldSelector = configMapSelector.String()
|
||||
return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).List(lo)
|
||||
|
||||
configMaps.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.FilteringResourceEventHandler{
|
||||
FilterFunc: func(obj interface{}) bool {
|
||||
switch t := obj.(type) {
|
||||
case *v1.ConfigMap:
|
||||
return t.Name == options.ConfigMapName && t.Namespace == options.ConfigMapNamespace
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj))
|
||||
return false
|
||||
}
|
||||
},
|
||||
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
|
||||
lo.FieldSelector = configMapSelector.String()
|
||||
return e.client.CoreV1().ConfigMaps(options.ConfigMapNamespace).Watch(lo)
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(_ interface{}) { e.pokeConfigMapSync() },
|
||||
UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() },
|
||||
},
|
||||
},
|
||||
&v1.ConfigMap{},
|
||||
options.ConfigMapResync,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(_ interface{}) { e.pokeConfigMapSync() },
|
||||
UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() },
|
||||
},
|
||||
)
|
||||
|
||||
secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)})
|
||||
e.secrets, e.secretsController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
|
||||
lo.FieldSelector = secretSelector.String()
|
||||
return e.client.CoreV1().Secrets(e.secretNamespace).List(lo)
|
||||
secrets.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.FilteringResourceEventHandler{
|
||||
FilterFunc: func(obj interface{}) bool {
|
||||
switch t := obj.(type) {
|
||||
case *v1.Secret:
|
||||
return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.secretNamespace
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj))
|
||||
return false
|
||||
}
|
||||
},
|
||||
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
|
||||
lo.FieldSelector = secretSelector.String()
|
||||
return e.client.CoreV1().Secrets(e.secretNamespace).Watch(lo)
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(_ interface{}) { e.pokeConfigMapSync() },
|
||||
UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() },
|
||||
DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() },
|
||||
},
|
||||
},
|
||||
&v1.Secret{},
|
||||
options.SecretResync,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(_ interface{}) { e.pokeConfigMapSync() },
|
||||
UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() },
|
||||
DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() },
|
||||
},
|
||||
)
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Run runs controller loops and returns when they are done
|
||||
func (e *BootstrapSigner) Run(stopCh <-chan struct{}) {
|
||||
go e.configMapsController.Run(stopCh)
|
||||
go e.secretsController.Run(stopCh)
|
||||
// Shut down queues
|
||||
defer utilruntime.HandleCrash()
|
||||
defer e.syncQueue.ShutDown()
|
||||
|
||||
if !controller.WaitForCacheSync("bootstrap_signer", stopCh, e.configMapSynced, e.secretSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Starting workers")
|
||||
go wait.Until(e.serviceConfigMapQueue, 0, stopCh)
|
||||
<-stopCh
|
||||
glog.V(1).Infof("Shutting down")
|
||||
}
|
||||
|
||||
func (e *BootstrapSigner) pokeConfigMapSync() {
|
||||
@ -237,27 +250,32 @@ func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) {
|
||||
|
||||
// getConfigMap gets the ConfigMap we are interested in
|
||||
func (e *BootstrapSigner) getConfigMap() *v1.ConfigMap {
|
||||
configMap, exists, err := e.configMaps.GetByKey(e.configMapKey)
|
||||
configMap, err := e.configMapLister.ConfigMaps(e.configMapNamespace).Get(e.configMapName)
|
||||
|
||||
// If we can't get the configmap just return nil. The resync will eventually
|
||||
// sync things up.
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return configMap
|
||||
}
|
||||
|
||||
func (e *BootstrapSigner) listSecrets() []*v1.Secret {
|
||||
secrets, err := e.secretLister.Secrets(e.secretNamespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if exists {
|
||||
return configMap.(*v1.ConfigMap)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *BootstrapSigner) listSecrets() []*v1.Secret {
|
||||
secrets := e.secrets.List()
|
||||
|
||||
items := []*v1.Secret{}
|
||||
for _, obj := range secrets {
|
||||
items = append(items, obj.(*v1.Secret))
|
||||
for _, secret := range secrets {
|
||||
if secret.Type == bootstrapapi.SecretTypeBootstrapToken {
|
||||
items = append(items, secret)
|
||||
}
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner_test.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/bootstrapsigner_test.go
generated
vendored
@ -24,10 +24,13 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -36,14 +39,17 @@ func init() {
|
||||
|
||||
const testTokenID = "abc123"
|
||||
|
||||
func newBootstrapSigner() (*BootstrapSigner, *fake.Clientset, error) {
|
||||
func newBootstrapSigner() (*BootstrapSigner, *fake.Clientset, coreinformers.SecretInformer, coreinformers.ConfigMapInformer, error) {
|
||||
options := DefaultBootstrapSignerOptions()
|
||||
cl := fake.NewSimpleClientset()
|
||||
bsc, err := NewBootstrapSigner(cl, options)
|
||||
informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc())
|
||||
secrets := informers.Core().V1().Secrets()
|
||||
configMaps := informers.Core().V1().ConfigMaps()
|
||||
bsc, err := NewBootstrapSigner(cl, secrets, configMaps, options)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
return bsc, cl, nil
|
||||
return bsc, cl, secrets, configMaps, nil
|
||||
}
|
||||
|
||||
func newConfigMap(tokenID, signature string) *v1.ConfigMap {
|
||||
@ -64,7 +70,7 @@ func newConfigMap(tokenID, signature string) *v1.ConfigMap {
|
||||
}
|
||||
|
||||
func TestNoConfigMap(t *testing.T) {
|
||||
signer, cl, err := newBootstrapSigner()
|
||||
signer, cl, _, _, err := newBootstrapSigner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating BootstrapSigner: %v", err)
|
||||
}
|
||||
@ -73,17 +79,17 @@ func TestNoConfigMap(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSimpleSign(t *testing.T) {
|
||||
signer, cl, err := newBootstrapSigner()
|
||||
signer, cl, secrets, configMaps, err := newBootstrapSigner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating BootstrapSigner: %v", err)
|
||||
}
|
||||
|
||||
cm := newConfigMap("", "")
|
||||
signer.configMaps.Add(cm)
|
||||
configMaps.Informer().GetIndexer().Add(cm)
|
||||
|
||||
secret := newTokenSecret(testTokenID, "tokenSecret")
|
||||
addSecretSigningUsage(secret, "true")
|
||||
signer.secrets.Add(secret)
|
||||
secrets.Informer().GetIndexer().Add(secret)
|
||||
|
||||
signer.signConfigMap()
|
||||
|
||||
@ -97,17 +103,17 @@ func TestSimpleSign(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNoSignNeeded(t *testing.T) {
|
||||
signer, cl, err := newBootstrapSigner()
|
||||
signer, cl, secrets, configMaps, err := newBootstrapSigner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating BootstrapSigner: %v", err)
|
||||
}
|
||||
|
||||
cm := newConfigMap(testTokenID, "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiYzEyMyJ9..QSxpUG7Q542CirTI2ECPSZjvBOJURUW5a7XqFpNI958")
|
||||
signer.configMaps.Add(cm)
|
||||
configMaps.Informer().GetIndexer().Add(cm)
|
||||
|
||||
secret := newTokenSecret(testTokenID, "tokenSecret")
|
||||
addSecretSigningUsage(secret, "true")
|
||||
signer.secrets.Add(secret)
|
||||
secrets.Informer().GetIndexer().Add(secret)
|
||||
|
||||
signer.signConfigMap()
|
||||
|
||||
@ -115,17 +121,17 @@ func TestNoSignNeeded(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateSignature(t *testing.T) {
|
||||
signer, cl, err := newBootstrapSigner()
|
||||
signer, cl, secrets, configMaps, err := newBootstrapSigner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating BootstrapSigner: %v", err)
|
||||
}
|
||||
|
||||
cm := newConfigMap(testTokenID, "old signature")
|
||||
signer.configMaps.Add(cm)
|
||||
configMaps.Informer().GetIndexer().Add(cm)
|
||||
|
||||
secret := newTokenSecret(testTokenID, "tokenSecret")
|
||||
addSecretSigningUsage(secret, "true")
|
||||
signer.secrets.Add(secret)
|
||||
secrets.Informer().GetIndexer().Add(secret)
|
||||
|
||||
signer.signConfigMap()
|
||||
|
||||
@ -139,13 +145,13 @@ func TestUpdateSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveSignature(t *testing.T) {
|
||||
signer, cl, err := newBootstrapSigner()
|
||||
signer, cl, _, configMaps, err := newBootstrapSigner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating BootstrapSigner: %v", err)
|
||||
}
|
||||
|
||||
cm := newConfigMap(testTokenID, "old signature")
|
||||
signer.configMaps.Add(cm)
|
||||
configMaps.Informer().GetIndexer().Add(cm)
|
||||
|
||||
signer.signConfigMap()
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/common_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/common_test.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/client-go/testing"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
)
|
||||
|
||||
func newTokenSecret(tokenID, tokenSecret string) *v1.Secret {
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/jws.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/jws.go
generated
vendored
@ -20,19 +20,28 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
jose "github.com/square/go-jose"
|
||||
jose "gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
// computeDetachedSig takes content and token details and computes a detached
|
||||
// JWS signature. This is described in Appendix F of RFC 7515. Basically, this
|
||||
// is a regular JWS with the content part of the signature elided.
|
||||
func computeDetachedSig(content, tokenID, tokenSecret string) (string, error) {
|
||||
jwk := &jose.JsonWebKey{
|
||||
jwk := &jose.JSONWebKey{
|
||||
Key: []byte(tokenSecret),
|
||||
KeyID: tokenID,
|
||||
}
|
||||
|
||||
signer, err := jose.NewSigner(jose.HS256, jwk)
|
||||
opts := &jose.SignerOptions{
|
||||
// Since this is a symmetric key, go-jose doesn't automatically include
|
||||
// the KeyID as part of the protected header. We have to pass it here
|
||||
// explicitly.
|
||||
ExtraHeaders: map[jose.HeaderKey]interface{}{
|
||||
"kid": tokenID,
|
||||
},
|
||||
}
|
||||
|
||||
signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.HS256, Key: jwk}, opts)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("can't make a HS256 signer from the given token: %v", err)
|
||||
}
|
||||
@ -51,7 +60,7 @@ func computeDetachedSig(content, tokenID, tokenSecret string) (string, error) {
|
||||
|
||||
// stripContent will remove the content part of a compact JWS
|
||||
//
|
||||
// The `go-jose` library doesn't support generating signatures with "detatched"
|
||||
// The `go-jose` library doesn't support generating signatures with "detached"
|
||||
// content. To make up for this we take the full compact signature, break it
|
||||
// apart and put it back together without the content section.
|
||||
func stripContent(fullSig string) (string, error) {
|
||||
|
131
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go
generated
vendored
131
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go
generated
vendored
@ -17,21 +17,23 @@ limitations under the License.
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
|
||||
@ -59,57 +61,128 @@ type TokenCleaner struct {
|
||||
|
||||
client clientset.Interface
|
||||
|
||||
secrets cache.Store
|
||||
secretsController cache.Controller
|
||||
// secretLister is able to list/get secrets and is populated by the shared informer passed to NewTokenCleaner.
|
||||
secretLister corelisters.SecretLister
|
||||
|
||||
// secretSynced returns true if the secret shared informer has been synced at least once.
|
||||
secretSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
// NewTokenCleaner returns a new *NewTokenCleaner.
|
||||
//
|
||||
// TODO: Switch to shared informers
|
||||
func NewTokenCleaner(cl clientset.Interface, options TokenCleanerOptions) (*TokenCleaner, error) {
|
||||
func NewTokenCleaner(cl clientset.Interface, secrets coreinformers.SecretInformer, options TokenCleanerOptions) (*TokenCleaner, error) {
|
||||
e := &TokenCleaner{
|
||||
client: cl,
|
||||
secretLister: secrets.Lister(),
|
||||
secretSynced: secrets.Informer().HasSynced,
|
||||
tokenSecretNamespace: options.TokenSecretNamespace,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "token_cleaner"),
|
||||
}
|
||||
|
||||
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
secretSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(bootstrapapi.SecretTypeBootstrapToken)})
|
||||
e.secrets, e.secretsController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {
|
||||
lo.FieldSelector = secretSelector.String()
|
||||
return e.client.CoreV1().Secrets(e.tokenSecretNamespace).List(lo)
|
||||
secrets.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.FilteringResourceEventHandler{
|
||||
FilterFunc: func(obj interface{}) bool {
|
||||
switch t := obj.(type) {
|
||||
case *v1.Secret:
|
||||
return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.tokenSecretNamespace
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj))
|
||||
return false
|
||||
}
|
||||
},
|
||||
WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {
|
||||
lo.FieldSelector = secretSelector.String()
|
||||
return e.client.CoreV1().Secrets(e.tokenSecretNamespace).Watch(lo)
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.enqueueSecrets,
|
||||
UpdateFunc: func(oldSecret, newSecret interface{}) { e.enqueueSecrets(newSecret) },
|
||||
},
|
||||
},
|
||||
&v1.Secret{},
|
||||
options.SecretResync,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.evalSecret,
|
||||
UpdateFunc: func(oldSecret, newSecret interface{}) { e.evalSecret(newSecret) },
|
||||
},
|
||||
)
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Run runs controller loops and returns when they are done
|
||||
func (tc *TokenCleaner) Run(stopCh <-chan struct{}) {
|
||||
go tc.secretsController.Run(stopCh)
|
||||
go wait.Until(tc.evalSecrets, 10*time.Second, stopCh)
|
||||
defer utilruntime.HandleCrash()
|
||||
defer tc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting token cleaner controller")
|
||||
defer glog.Infof("Shutting down token cleaner controller")
|
||||
|
||||
if !controller.WaitForCacheSync("token_cleaner", stopCh, tc.secretSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
go wait.Until(tc.worker, 10*time.Second, stopCh)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (tc *TokenCleaner) evalSecrets() {
|
||||
for _, obj := range tc.secrets.List() {
|
||||
tc.evalSecret(obj)
|
||||
func (tc *TokenCleaner) enqueueSecrets(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
tc.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a thread that dequeues secrets, handles them, and marks them done.
|
||||
func (tc *TokenCleaner) worker() {
|
||||
for tc.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
|
||||
func (tc *TokenCleaner) processNextWorkItem() bool {
|
||||
key, quit := tc.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer tc.queue.Done(key)
|
||||
|
||||
if err := tc.syncFunc(key.(string)); err != nil {
|
||||
tc.queue.AddRateLimited(key)
|
||||
utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", key, err))
|
||||
return true
|
||||
}
|
||||
|
||||
tc.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
func (tc *TokenCleaner) syncFunc(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := tc.secretLister.Secrets(namespace).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.V(3).Infof("secret has been deleted: %v", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ret.Type == bootstrapapi.SecretTypeBootstrapToken {
|
||||
tc.evalSecret(ret)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *TokenCleaner) evalSecret(o interface{}) {
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner_test.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner_test.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -32,24 +34,26 @@ func init() {
|
||||
spew.Config.DisableMethods = true
|
||||
}
|
||||
|
||||
func newTokenCleaner() (*TokenCleaner, *fake.Clientset, error) {
|
||||
func newTokenCleaner() (*TokenCleaner, *fake.Clientset, coreinformers.SecretInformer, error) {
|
||||
options := DefaultTokenCleanerOptions()
|
||||
cl := fake.NewSimpleClientset()
|
||||
tcc, err := NewTokenCleaner(cl, options)
|
||||
informerFactory := informers.NewSharedInformerFactory(cl, options.SecretResync)
|
||||
secrets := informerFactory.Core().V1().Secrets()
|
||||
tcc, err := NewTokenCleaner(cl, secrets, options)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return tcc, cl, nil
|
||||
return tcc, cl, secrets, nil
|
||||
}
|
||||
|
||||
func TestCleanerNoExpiration(t *testing.T) {
|
||||
cleaner, cl, err := newTokenCleaner()
|
||||
cleaner, cl, secrets, err := newTokenCleaner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating TokenCleaner: %v", err)
|
||||
}
|
||||
|
||||
secret := newTokenSecret("tokenID", "tokenSecret")
|
||||
cleaner.secrets.Add(secret)
|
||||
secrets.Informer().GetIndexer().Add(secret)
|
||||
|
||||
cleaner.evalSecret(secret)
|
||||
|
||||
@ -59,14 +63,14 @@ func TestCleanerNoExpiration(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCleanerExpired(t *testing.T) {
|
||||
cleaner, cl, err := newTokenCleaner()
|
||||
cleaner, cl, secrets, err := newTokenCleaner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating TokenCleaner: %v", err)
|
||||
}
|
||||
|
||||
secret := newTokenSecret("tokenID", "tokenSecret")
|
||||
addSecretExpiration(secret, timeString(-time.Hour))
|
||||
cleaner.secrets.Add(secret)
|
||||
secrets.Informer().GetIndexer().Add(secret)
|
||||
|
||||
cleaner.evalSecret(secret)
|
||||
|
||||
@ -81,14 +85,14 @@ func TestCleanerExpired(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCleanerNotExpired(t *testing.T) {
|
||||
cleaner, cl, err := newTokenCleaner()
|
||||
cleaner, cl, secrets, err := newTokenCleaner()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating TokenCleaner: %v", err)
|
||||
}
|
||||
|
||||
secret := newTokenSecret("tokenID", "tokenSecret")
|
||||
addSecretExpiration(secret, timeString(time.Hour))
|
||||
cleaner.secrets.Add(secret)
|
||||
secrets.Informer().GetIndexer().Add(secret)
|
||||
|
||||
cleaner.evalSecret(secret)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/util.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/util.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
)
|
||||
|
||||
var namePattern = `^` + regexp.QuoteMeta(bootstrapapi.BootstrapTokenSecretPrefix) + `([a-z0-9]{6})$`
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/util_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/util_test.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
)
|
||||
|
||||
const (
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/certificates/BUILD
generated
vendored
@ -15,7 +15,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/juju/ratelimit:go_default_library",
|
||||
"//vendor/golang.org/x/time/rate:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
@ -54,8 +54,7 @@ filegroup(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["certificate_controller_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/certificates",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/certificates/approver/BUILD
generated
vendored
@ -9,8 +9,7 @@ load(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["sarapprove_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/certificates/approver",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/certificates/certificate_controller.go
generated
vendored
@ -22,6 +22,9 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@ -34,9 +37,6 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type CertificateController struct {
|
||||
@ -65,7 +65,7 @@ func NewCertificateController(
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 1000*time.Second),
|
||||
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
|
||||
&workqueue.BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))},
|
||||
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
|
||||
), "certificate"),
|
||||
handler: handler,
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/BUILD
generated
vendored
@ -35,8 +35,7 @@ filegroup(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cleaner_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/certificates/cleaner",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/cleaner.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/certificates/cleaner/cleaner.go
generated
vendored
@ -124,7 +124,7 @@ func isIssuedExpired(csr *capi.CertificateSigningRequest) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
if c.Type == capi.CertificateApproved && isIssued(csr) && isExpired {
|
||||
glog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name, approvedExpiration)
|
||||
glog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/certificates/signer/BUILD
generated
vendored
@ -14,8 +14,7 @@ go_test(
|
||||
"testdata/ca.key",
|
||||
"testdata/kubelet.csr",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/certificates/signer",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/controller/cloud/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/cloud/BUILD
generated
vendored
@ -18,8 +18,8 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
@ -48,15 +48,14 @@ go_test(
|
||||
"node_controller_test.go",
|
||||
"pvlcontroller_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/cloud",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
50
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go
generated
vendored
50
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -37,8 +38,8 @@ import (
|
||||
nodeutilv1 "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
var UpdateNodeSpecBackoff = wait.Backoff{
|
||||
@ -97,8 +98,11 @@ func NewCloudNodeController(
|
||||
nodeStatusUpdateFrequency: nodeStatusUpdateFrequency,
|
||||
}
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: cnc.AddCloudNode,
|
||||
// Use shared informer to listen to add/update of nodes. Note that any nodes
|
||||
// that exist before node controller starts will show up in the update method
|
||||
cnc.nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: cnc.AddCloudNode,
|
||||
UpdateFunc: cnc.UpdateCloudNode,
|
||||
})
|
||||
|
||||
return cnc
|
||||
@ -192,7 +196,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud
|
||||
if !nodeAddressesChangeDetected(node.Status.Addresses, newNode.Status.Addresses) {
|
||||
return
|
||||
}
|
||||
_, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode)
|
||||
_, _, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
|
||||
}
|
||||
@ -276,22 +280,30 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
}
|
||||
}
|
||||
|
||||
func (cnc *CloudNodeController) UpdateCloudNode(_, newObj interface{}) {
|
||||
if _, ok := newObj.(*v1.Node); !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
|
||||
return
|
||||
}
|
||||
cnc.AddCloudNode(newObj)
|
||||
}
|
||||
|
||||
// This processes nodes that were added into the cluster, and cloud initialize them if appropriate
|
||||
func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
node := obj.(*v1.Node)
|
||||
|
||||
instances, ok := cnc.cloud.Instances()
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
|
||||
return
|
||||
}
|
||||
|
||||
cloudTaint := getCloudTaint(node.Spec.Taints)
|
||||
if cloudTaint == nil {
|
||||
glog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name)
|
||||
return
|
||||
}
|
||||
|
||||
instances, ok := cnc.cloud.Instances()
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
|
||||
return
|
||||
}
|
||||
|
||||
err := clientretry.RetryOnConflict(UpdateNodeSpecBackoff, func() error {
|
||||
curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -299,7 +311,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
|
||||
}
|
||||
|
||||
if curNode.Spec.ProviderID == "" {
|
||||
providerID, err := cloudprovider.GetInstanceProviderID(cnc.cloud, types.NodeName(curNode.Name))
|
||||
providerID, err := cloudprovider.GetInstanceProviderID(context.TODO(), cnc.cloud, types.NodeName(curNode.Name))
|
||||
if err == nil {
|
||||
curNode.Spec.ProviderID = providerID
|
||||
} else {
|
||||
@ -402,10 +414,10 @@ func excludeTaintFromList(taints []v1.Taint, toExclude v1.Taint) []v1.Taint {
|
||||
|
||||
// ensureNodeExistsByProviderIDOrExternalID first checks if the instance exists by the provider id and then by calling external id with node name
|
||||
func ensureNodeExistsByProviderIDOrExternalID(instances cloudprovider.Instances, node *v1.Node) (bool, error) {
|
||||
exists, err := instances.InstanceExistsByProviderID(node.Spec.ProviderID)
|
||||
exists, err := instances.InstanceExistsByProviderID(context.TODO(), node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
providerIDErr := err
|
||||
_, err = instances.ExternalID(types.NodeName(node.Name))
|
||||
_, err = instances.ExternalID(context.TODO(), types.NodeName(node.Name))
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
@ -420,10 +432,10 @@ func ensureNodeExistsByProviderIDOrExternalID(instances cloudprovider.Instances,
|
||||
}
|
||||
|
||||
func getNodeAddressesByProviderIDOrName(instances cloudprovider.Instances, node *v1.Node) ([]v1.NodeAddress, error) {
|
||||
nodeAddresses, err := instances.NodeAddressesByProviderID(node.Spec.ProviderID)
|
||||
nodeAddresses, err := instances.NodeAddressesByProviderID(context.TODO(), node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
providerIDErr := err
|
||||
nodeAddresses, err = instances.NodeAddresses(types.NodeName(node.Name))
|
||||
nodeAddresses, err = instances.NodeAddresses(context.TODO(), types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NodeAddress: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
||||
}
|
||||
@ -467,10 +479,10 @@ func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) (
|
||||
}
|
||||
|
||||
func getInstanceTypeByProviderIDOrName(instances cloudprovider.Instances, node *v1.Node) (string, error) {
|
||||
instanceType, err := instances.InstanceTypeByProviderID(node.Spec.ProviderID)
|
||||
instanceType, err := instances.InstanceTypeByProviderID(context.TODO(), node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
providerIDErr := err
|
||||
instanceType, err = instances.InstanceType(types.NodeName(node.Name))
|
||||
instanceType, err = instances.InstanceType(context.TODO(), types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("InstanceType: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
||||
}
|
||||
@ -481,10 +493,10 @@ func getInstanceTypeByProviderIDOrName(instances cloudprovider.Instances, node *
|
||||
// getZoneByProviderIDorName will attempt to get the zone of node using its providerID
|
||||
// then it's name. If both attempts fail, an error is returned
|
||||
func getZoneByProviderIDOrName(zones cloudprovider.Zones, node *v1.Node) (cloudprovider.Zone, error) {
|
||||
zone, err := zones.GetZoneByProviderID(node.Spec.ProviderID)
|
||||
zone, err := zones.GetZoneByProviderID(context.TODO(), node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
providerIDErr := err
|
||||
zone, err = zones.GetZoneByNodeName(types.NodeName(node.Name))
|
||||
zone, err = zones.GetZoneByNodeName(context.TODO(), types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("Zone: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller_test.go
generated
vendored
@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/controller/cloud/pvlcontroller.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/controller/cloud/pvlcontroller.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloud
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
@ -182,10 +183,10 @@ func (pvlc *PersistentVolumeLabelController) addLabels(key string) error {
|
||||
|
||||
func (pvlc *PersistentVolumeLabelController) addLabelsToVolume(vol *v1.PersistentVolume) error {
|
||||
var volumeLabels map[string]string
|
||||
// Only add labels if in the list of initializers
|
||||
// Only add labels if the next pending initializer.
|
||||
if needsInitialization(vol.Initializers, initializerName) {
|
||||
if labeler, ok := (pvlc.cloud).(cloudprovider.PVLabeler); ok {
|
||||
labels, err := labeler.GetLabelsForVolume(vol)
|
||||
labels, err := labeler.GetLabelsForVolume(context.TODO(), vol)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error querying volume %v: %v", vol.Spec, err)
|
||||
}
|
||||
@ -265,16 +266,17 @@ func removeInitializer(initializers *metav1.Initializers, name string) *metav1.I
|
||||
return &metav1.Initializers{Pending: updated}
|
||||
}
|
||||
|
||||
// needsInitialization checks whether or not the PVL is the next pending initializer.
|
||||
func needsInitialization(initializers *metav1.Initializers, name string) bool {
|
||||
hasInitializer := false
|
||||
|
||||
if initializers != nil {
|
||||
for _, pending := range initializers.Pending {
|
||||
if pending.Name == name {
|
||||
hasInitializer = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if initializers == nil {
|
||||
return false
|
||||
}
|
||||
return hasInitializer
|
||||
|
||||
if len(initializers.Pending) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// There is at least one initializer still pending so check to
|
||||
// see if the PVL is the next in line.
|
||||
return initializers.Pending[0].Name == name
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/controller/cloud/pvlcontroller_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/cloud/pvlcontroller_test.go
generated
vendored
@ -146,11 +146,16 @@ func TestAddLabelsToVolume(t *testing.T) {
|
||||
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: initializerName}}},
|
||||
shouldLabel: true,
|
||||
},
|
||||
"PV with other initializers": {
|
||||
"PV with other initializers only": {
|
||||
vol: pv,
|
||||
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "OtherInit"}}},
|
||||
shouldLabel: false,
|
||||
},
|
||||
"PV with other initializers first": {
|
||||
vol: pv,
|
||||
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "OtherInit"}, {Name: initializerName}}},
|
||||
shouldLabel: false,
|
||||
},
|
||||
}
|
||||
|
||||
for d, tc := range testCases {
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/clusterroleaggregation/BUILD
generated
vendored
@ -40,8 +40,7 @@ filegroup(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["clusterroleaggregation_controller_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/clusterroleaggregation",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -429,25 +429,25 @@ func NewControllerRevisionControllerRefManager(
|
||||
// * Adopt orphans if the selector matches.
|
||||
// * Release owned objects if the selector no longer matches.
|
||||
//
|
||||
// A non-nil error is returned if some form of reconciliation was attemped and
|
||||
// A non-nil error is returned if some form of reconciliation was attempted and
|
||||
// failed. Usually, controllers should try again later in case reconciliation
|
||||
// is still needed.
|
||||
//
|
||||
// If the error is nil, either the reconciliation succeeded, or no
|
||||
// reconciliation was necessary. The list of ControllerRevisions that you now own is
|
||||
// returned.
|
||||
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*appsv1beta1.ControllerRevision) ([]*appsv1beta1.ControllerRevision, error) {
|
||||
var claimed []*appsv1beta1.ControllerRevision
|
||||
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
|
||||
var claimed []*apps.ControllerRevision
|
||||
var errlist []error
|
||||
|
||||
match := func(obj metav1.Object) bool {
|
||||
return m.Selector.Matches(labels.Set(obj.GetLabels()))
|
||||
}
|
||||
adopt := func(obj metav1.Object) error {
|
||||
return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision))
|
||||
return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
|
||||
}
|
||||
release := func(obj metav1.Object) error {
|
||||
return m.ReleaseControllerRevision(obj.(*appsv1beta1.ControllerRevision))
|
||||
return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
|
||||
}
|
||||
|
||||
for _, h := range histories {
|
||||
@ -465,7 +465,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
|
||||
|
||||
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
|
||||
// the patching fails.
|
||||
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error {
|
||||
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
|
||||
if err := m.CanAdopt(); err != nil {
|
||||
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
|
||||
}
|
||||
@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
|
||||
|
||||
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error {
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
|
||||
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
@ -64,7 +64,7 @@ const (
|
||||
// latency/pod at the scale of 3000 pods over 100 nodes.
|
||||
ExpectationsTimeout = 5 * time.Minute
|
||||
// When batching pod creates, SlowStartInitialBatchSize is the size of the
|
||||
// inital batch. The size of each successive batch is twice the size of
|
||||
// initial batch. The size of each successive batch is twice the size of
|
||||
// the previous batch. For example, for a value of 1, batch sizes would be
|
||||
// 1, 2, 4, 8, ... and for a value of 10, batch sizes would be
|
||||
// 10, 20, 40, 80, ... Setting the value higher means that quota denials
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/cronjob/BUILD
generated
vendored
@ -46,8 +46,7 @@ go_test(
|
||||
"cronjob_controller_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/cronjob",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/batch/install:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/cronjob/cronjob_controller.go
generated
vendored
@ -297,7 +297,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo
|
||||
// there is some risk that we won't see an active job when there is one.
|
||||
// (because we haven't seen the status update to the SJ or the created pod).
|
||||
// So it is theoretically possible to have concurrency with Forbid.
|
||||
// As long the as the invokations are "far enough apart in time", this usually won't happen.
|
||||
// As long the as the invocations are "far enough apart in time", this usually won't happen.
|
||||
//
|
||||
// TODO: for Forbid, we could use the same name for every execution, as a lock.
|
||||
// With replace, we could use a name that is deterministic per execution time.
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
@ -21,15 +21,14 @@ go_library(
|
||||
"//pkg/controller/daemon/util:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
@ -42,16 +41,14 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
@ -65,20 +62,18 @@ go_test(
|
||||
"daemon_controller_test.go",
|
||||
"update_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
104
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
104
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
@ -23,9 +23,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -34,16 +33,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1beta1"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
unversionedapps "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1beta1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/integer"
|
||||
@ -54,22 +51,25 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// The value of 250 is chosen b/c values that are too high can cause registry DoS issues
|
||||
// BurstReplicas is a rate limiter for booting pods on a lot of pods.
|
||||
// The value of 250 is chosen b/c values that are too high can cause registry DoS issues.
|
||||
BurstReplicas = 250
|
||||
|
||||
// If sending a status update to API server fails, we retry a finite number of times.
|
||||
// StatusUpdateRetries limits the number of retries if sending a status update to API server fails.
|
||||
StatusUpdateRetries = 1
|
||||
)
|
||||
|
||||
// Reasons for DaemonSet events
|
||||
// Reasons for DaemonSet events
|
||||
const (
|
||||
// SelectingAllReason is added to an event when a DaemonSet selects all Pods.
|
||||
SelectingAllReason = "SelectingAll"
|
||||
// FailedPlacementReason is added to an event when a DaemonSet can't schedule a Pod to a specified node.
|
||||
@ -79,7 +79,7 @@ const (
|
||||
)
|
||||
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
var controllerKind = extensions.SchemeGroupVersion.WithKind("DaemonSet")
|
||||
var controllerKind = apps.SchemeGroupVersion.WithKind("DaemonSet")
|
||||
|
||||
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
|
||||
// in the system with actual running pods.
|
||||
@ -96,12 +96,12 @@ type DaemonSetsController struct {
|
||||
// To allow injection of syncDaemonSet for testing.
|
||||
syncHandler func(dsKey string) error
|
||||
// used for unit testing
|
||||
enqueueDaemonSet func(ds *extensions.DaemonSet)
|
||||
enqueueDaemonSetRateLimited func(ds *extensions.DaemonSet)
|
||||
enqueueDaemonSet func(ds *apps.DaemonSet)
|
||||
enqueueDaemonSetRateLimited func(ds *apps.DaemonSet)
|
||||
// A TTLCache of pod creates/deletes each ds expects to see
|
||||
expectations controller.ControllerExpectationsInterface
|
||||
// dsLister can list/get daemonsets from the shared informer's store
|
||||
dsLister extensionslisters.DaemonSetLister
|
||||
dsLister appslisters.DaemonSetLister
|
||||
// dsStoreSynced returns true if the daemonset store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
dsStoreSynced cache.InformerSynced
|
||||
@ -130,7 +130,8 @@ type DaemonSetsController struct {
|
||||
suspendedDaemonPods map[string]sets.String
|
||||
}
|
||||
|
||||
func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
|
||||
// NewDaemonSetsController creates a new DaemonSetsController
|
||||
func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
@ -159,13 +160,13 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
|
||||
|
||||
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
ds := obj.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldDS := old.(*extensions.DaemonSet)
|
||||
curDS := cur.(*extensions.DaemonSet)
|
||||
oldDS := old.(*apps.DaemonSet)
|
||||
curDS := cur.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
},
|
||||
@ -207,14 +208,14 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
ds, ok := obj.(*extensions.DaemonSet)
|
||||
ds, ok := obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
ds, ok = tombstone.Obj.(*extensions.DaemonSet)
|
||||
ds, ok = tombstone.Obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj))
|
||||
return
|
||||
@ -268,7 +269,7 @@ func (dsc *DaemonSetsController) processNextWorkItem() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) {
|
||||
func (dsc *DaemonSetsController) enqueue(ds *apps.DaemonSet) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err))
|
||||
@ -279,7 +280,7 @@ func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) {
|
||||
dsc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) enqueueRateLimited(ds *extensions.DaemonSet) {
|
||||
func (dsc *DaemonSetsController) enqueueRateLimited(ds *apps.DaemonSet) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err))
|
||||
@ -301,7 +302,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSetAfter(obj interface{}, after ti
|
||||
}
|
||||
|
||||
// getDaemonSetsForPod returns a list of DaemonSets that potentially match the pod.
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*apps.DaemonSet {
|
||||
sets, err := dsc.dsLister.GetPodDaemonSets(pod)
|
||||
if err != nil {
|
||||
return nil
|
||||
@ -316,7 +317,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.
|
||||
|
||||
// getDaemonSetsForHistory returns a list of DaemonSets that potentially
|
||||
// match a ControllerRevision.
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*apps.DaemonSet {
|
||||
daemonSets, err := dsc.dsLister.GetHistoryDaemonSets(history)
|
||||
if err != nil || len(daemonSets) == 0 {
|
||||
return nil
|
||||
@ -732,7 +733,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned Pods are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.Pod, error) {
|
||||
func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -747,7 +748,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.
|
||||
// If any adoptions are attempted, we should first recheck for deletion with
|
||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||
dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||
fresh, err := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -766,7 +767,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned Pods are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
|
||||
claimedPods, err := dsc.getDaemonPods(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -783,7 +784,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet)
|
||||
// resolveControllerRef returns the controller referenced by a ControllerRef,
|
||||
// or nil if the ControllerRef could not be resolved to a matching controller
|
||||
// of the correct Kind.
|
||||
func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.DaemonSet {
|
||||
// We can't look up by UID, so look up by Name and then verify UID.
|
||||
// Don't even try to look up by Name if it's the wrong Kind.
|
||||
if controllerRef.Kind != controllerKind.Kind {
|
||||
@ -805,7 +806,7 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll
|
||||
// After figuring out which nodes should run a Pod of ds but not yet running one and
|
||||
// which nodes should not run a Pod of ds but currently running one, it calls function
|
||||
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
|
||||
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) error {
|
||||
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
// Find out which nodes are running the daemon pods controlled by ds.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
@ -887,7 +888,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) e
|
||||
|
||||
// syncNodes deletes given pods and creates new daemon set pods on the given nodes
|
||||
// returns slice with erros if any
|
||||
func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||
func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
@ -911,7 +912,13 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
|
||||
|
||||
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
createWait := sync.WaitGroup{}
|
||||
template := util.CreatePodTemplate(ds.Spec.Template, ds.Spec.TemplateGeneration, hash)
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
template := util.CreatePodTemplate(ds.Spec.Template, generation, hash)
|
||||
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
|
||||
// and double with each successful iteration in a kind of "slow start".
|
||||
// This handles attempts to start large numbers of pods that would
|
||||
@ -985,7 +992,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
|
||||
func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
|
||||
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
|
||||
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
|
||||
int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
|
||||
@ -1024,7 +1031,7 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
|
||||
return updateErr
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, hash string) error {
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string) error {
|
||||
glog.V(4).Infof("Updating daemon set status")
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
@ -1059,7 +1066,13 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
|
||||
numberAvailable++
|
||||
}
|
||||
}
|
||||
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) {
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
if util.IsPodUpdated(pod, hash, generation) {
|
||||
updatedNumberScheduled++
|
||||
}
|
||||
}
|
||||
@ -1071,7 +1084,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
|
||||
}
|
||||
numberUnavailable := desiredNumberScheduled - numberAvailable
|
||||
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
@ -1118,7 +1131,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err)
|
||||
}
|
||||
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
||||
if ds.DeletionTimestamp != nil || !dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
// Only update status.
|
||||
@ -1133,8 +1146,8 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
// Process rolling updates if we're ready.
|
||||
if dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
switch ds.Spec.UpdateStrategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
err = dsc.rollingUpdate(ds, hash)
|
||||
}
|
||||
if err != nil {
|
||||
@ -1150,7 +1163,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return dsc.updateDaemonSetStatus(ds, hash)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *extensions.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
|
||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
@ -1236,7 +1249,7 @@ func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *ext
|
||||
// * shouldContinueRunning:
|
||||
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
|
||||
// running on that node.
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
|
||||
newPod := NewPod(ds, node.Name)
|
||||
|
||||
// Because these bools require an && of all their required conditions, we start
|
||||
@ -1320,7 +1333,8 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||
return
|
||||
}
|
||||
|
||||
func NewPod(ds *extensions.DaemonSet, nodeName string) *v1.Pod {
|
||||
// NewPod creates a new pod
|
||||
func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
|
||||
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
|
||||
newPod.Namespace = ds.Namespace
|
||||
newPod.Spec.NodeName = nodeName
|
||||
@ -1358,7 +1372,7 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []*extensions.DaemonSet
|
||||
type byCreationTimestamp []*apps.DaemonSet
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
325
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
325
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -40,14 +40,13 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -83,28 +82,27 @@ var (
|
||||
}}
|
||||
)
|
||||
|
||||
func getKey(ds *extensions.DaemonSet, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(ds); err != nil {
|
||||
func getKey(ds *apps.DaemonSet, t *testing.T) string {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err)
|
||||
return ""
|
||||
} else {
|
||||
return key
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
func newDaemonSet(name string) *apps.DaemonSet {
|
||||
two := int32(2)
|
||||
return &extensions.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||
return &apps.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
RevisionHistoryLimit: &two,
|
||||
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
|
||||
Type: extensions.OnDeleteDaemonSetStrategyType,
|
||||
UpdateStrategy: apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.OnDeleteDaemonSetStrategyType,
|
||||
},
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -127,22 +125,22 @@ func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
}
|
||||
}
|
||||
|
||||
func newRollbackStrategy() *extensions.DaemonSetUpdateStrategy {
|
||||
func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
|
||||
one := intstr.FromInt(1)
|
||||
return &extensions.DaemonSetUpdateStrategy{
|
||||
Type: extensions.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &extensions.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||
return &apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||
}
|
||||
}
|
||||
|
||||
func newOnDeleteStrategy() *extensions.DaemonSetUpdateStrategy {
|
||||
return &extensions.DaemonSetUpdateStrategy{
|
||||
Type: extensions.OnDeleteDaemonSetStrategyType,
|
||||
func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
|
||||
return &apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.OnDeleteDaemonSetStrategyType,
|
||||
}
|
||||
}
|
||||
|
||||
func updateStrategies() []*extensions.DaemonSetUpdateStrategy {
|
||||
return []*extensions.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
func updateStrategies() []*apps.DaemonSetUpdateStrategy {
|
||||
return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
}
|
||||
|
||||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
@ -170,14 +168,14 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string, ds *extensions.DaemonSet) *v1.Pod {
|
||||
func newPod(podName string, nodeName string, label map[string]string, ds *apps.DaemonSet) *v1.Pod {
|
||||
// Add hash unique label to the pod
|
||||
newLabels := label
|
||||
var podSpec v1.PodSpec
|
||||
// Copy pod spec from DaemonSet template, or use a default one if DaemonSet is nil
|
||||
if ds != nil {
|
||||
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
|
||||
newLabels = labelsutil.CloneAndAddLabel(label, extensions.DefaultDaemonSetUniqueLabelKey, hash)
|
||||
newLabels = labelsutil.CloneAndAddLabel(label, apps.DefaultDaemonSetUniqueLabelKey, hash)
|
||||
podSpec = ds.Spec.Template.Spec
|
||||
} else {
|
||||
podSpec = v1.PodSpec{
|
||||
@ -212,14 +210,14 @@ func newPod(podName string, nodeName string, label map[string]string, ds *extens
|
||||
return pod
|
||||
}
|
||||
|
||||
func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
|
||||
func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
|
||||
for i := 0; i < number; i++ {
|
||||
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
|
||||
podStore.Add(pod)
|
||||
}
|
||||
}
|
||||
|
||||
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
|
||||
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
|
||||
for i := 0; i < number; i++ {
|
||||
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
|
||||
pod.Status = v1.PodStatus{Phase: v1.PodFailed}
|
||||
@ -299,8 +297,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
|
||||
dsc, err := NewDaemonSetsController(
|
||||
informerFactory.Extensions().V1beta1().DaemonSets(),
|
||||
informerFactory.Apps().V1beta1().ControllerRevisions(),
|
||||
informerFactory.Apps().V1().DaemonSets(),
|
||||
informerFactory.Apps().V1().ControllerRevisions(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
clientset,
|
||||
@ -322,8 +320,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
|
||||
return &daemonSetsController{
|
||||
dsc,
|
||||
informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(),
|
||||
informerFactory.Apps().V1beta1().ControllerRevisions().Informer().GetStore(),
|
||||
informerFactory.Apps().V1().DaemonSets().Informer().GetStore(),
|
||||
informerFactory.Apps().V1().ControllerRevisions().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Pods().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Nodes().Informer().GetStore(),
|
||||
fakeRecorder,
|
||||
@ -346,7 +344,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod
|
||||
}
|
||||
// Make sure the ControllerRefs are correct.
|
||||
for _, controllerRef := range fakePodControl.ControllerRefs {
|
||||
if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want {
|
||||
if got, want := controllerRef.APIVersion, "apps/v1"; got != want {
|
||||
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
|
||||
}
|
||||
if got, want := controllerRef.Kind, "DaemonSet"; got != want {
|
||||
@ -358,7 +356,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod
|
||||
}
|
||||
}
|
||||
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) {
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get key for daemon.")
|
||||
@ -368,7 +366,7 @@ func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *
|
||||
}
|
||||
|
||||
// clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations.
|
||||
func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, fakePodControl *fakePodControl) {
|
||||
func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, fakePodControl *fakePodControl) {
|
||||
fakePodControl.Clear()
|
||||
|
||||
key, err := controller.KeyFunc(ds)
|
||||
@ -459,13 +457,13 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
|
||||
var updated *extensions.DaemonSet
|
||||
var updated *apps.DaemonSet
|
||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetSubresource() != "status" {
|
||||
return false, nil, nil
|
||||
}
|
||||
if u, ok := action.(core.UpdateAction); ok {
|
||||
updated = u.GetObject().(*extensions.DaemonSet)
|
||||
updated = u.GetObject().(*apps.DaemonSet)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
@ -542,6 +540,12 @@ func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||
}
|
||||
}
|
||||
|
||||
func resourceContainerSpec(memory, cpu string) v1.ResourceRequirements {
|
||||
return v1.ResourceRequirements{
|
||||
Requests: allocatableResources(memory, cpu),
|
||||
}
|
||||
}
|
||||
|
||||
func resourcePodSpecWithoutNodeName(memory, cpu string) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
@ -579,9 +583,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
})
|
||||
manager.dsStore.Add(ds)
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -609,9 +613,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
|
||||
})
|
||||
manager.dsStore.Add(ds)
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -1117,13 +1121,13 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
var updated *extensions.DaemonSet
|
||||
var updated *apps.DaemonSet
|
||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetSubresource() != "status" {
|
||||
return false, nil, nil
|
||||
}
|
||||
if u, ok := action.(core.UpdateAction); ok {
|
||||
updated = u.GetObject().(*extensions.DaemonSet)
|
||||
updated = u.GetObject().(*apps.DaemonSet)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
@ -1160,13 +1164,13 @@ func TestObservedGeneration(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
var updated *extensions.DaemonSet
|
||||
var updated *apps.DaemonSet
|
||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetSubresource() != "status" {
|
||||
return false, nil, nil
|
||||
}
|
||||
if u, ok := action.(core.UpdateAction); ok {
|
||||
updated = u.GetObject().(*extensions.DaemonSet)
|
||||
updated = u.GetObject().(*apps.DaemonSet)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
@ -1379,7 +1383,7 @@ func setNodeTaint(node *v1.Node, taints []v1.Taint) {
|
||||
node.Spec.Taints = taints
|
||||
}
|
||||
|
||||
func setDaemonSetToleration(ds *extensions.DaemonSet, tolerations []v1.Toleration) {
|
||||
func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
|
||||
ds.Spec.Template.Spec.Tolerations = tolerations
|
||||
}
|
||||
|
||||
@ -1476,9 +1480,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
|
||||
manager.dsStore.Add(ds)
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -1487,9 +1491,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||
// Enabling critical pod annotation feature gate should create critical pod
|
||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -1528,7 +1532,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setDaemonSetCritical(ds *extensions.DaemonSet) {
|
||||
func setDaemonSetCritical(ds *apps.DaemonSet) {
|
||||
ds.Namespace = api.NamespaceSystem
|
||||
if ds.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
@ -1538,15 +1542,17 @@ func setDaemonSetCritical(ds *extensions.DaemonSet) {
|
||||
|
||||
func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
cases := []struct {
|
||||
predicateName string
|
||||
podsOnNode []*v1.Pod
|
||||
nodeCondition []v1.NodeCondition
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
wantToRun, shouldSchedule, shouldContinueRunning bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1561,8 +1567,9 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
{
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
predicateName: "InsufficientResourceError",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1577,8 +1584,9 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
{
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
predicateName: "ErrPodNotMatchHostName",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1593,6 +1601,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
shouldContinueRunning: false,
|
||||
},
|
||||
{
|
||||
predicateName: "ErrPodNotFitsHostPorts",
|
||||
podsOnNode: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
@ -1604,8 +1613,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1625,11 +1634,177 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
shouldSchedule: false,
|
||||
shouldContinueRunning: false,
|
||||
},
|
||||
{
|
||||
predicateName: "InsufficientResourceError",
|
||||
podsOnNode: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 666,
|
||||
}},
|
||||
Resources: resourceContainerSpec("50M", "0.5"),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: resourcePodSpec("", "100M", "0.5"),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantToRun: true,
|
||||
shouldSchedule: false,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
podsOnNode: []*v1.Pod{
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 666,
|
||||
}},
|
||||
Resources: resourceContainerSpec("50M", "0.5"),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: resourcePodSpec("", "50M", "0.5"),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantToRun: true,
|
||||
shouldSchedule: true,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
{
|
||||
predicateName: "ErrNodeSelectorNotMatch",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: simpleDaemonSetLabel2,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantToRun: false,
|
||||
shouldSchedule: false,
|
||||
shouldContinueRunning: false,
|
||||
},
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: simpleDaemonSetLabel,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantToRun: true,
|
||||
shouldSchedule: true,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
{
|
||||
predicateName: "ErrPodAffinityNotMatch",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "type",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"test"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantToRun: false,
|
||||
shouldSchedule: false,
|
||||
shouldContinueRunning: false,
|
||||
},
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "type",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"production"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantToRun: true,
|
||||
shouldSchedule: true,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
for _, strategy := range updateStrategies() {
|
||||
node := newNode("test-node", nil)
|
||||
node := newNode("test-node", simpleDaemonSetLabel)
|
||||
node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
|
||||
node.Status.Allocatable = allocatableResources("100M", "1")
|
||||
manager, _, _, err := newTestController()
|
||||
@ -1645,16 +1820,16 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||
|
||||
if wantToRun != c.wantToRun {
|
||||
t.Errorf("[%v] expected wantToRun: %v, got: %v", i, c.wantToRun, wantToRun)
|
||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected wantToRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.wantToRun, wantToRun)
|
||||
}
|
||||
if shouldSchedule != c.shouldSchedule {
|
||||
t.Errorf("[%v] expected shouldSchedule: %v, got: %v", i, c.shouldSchedule, shouldSchedule)
|
||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldSchedule: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldSchedule, shouldSchedule)
|
||||
}
|
||||
if shouldContinueRunning != c.shouldContinueRunning {
|
||||
t.Errorf("[%v] expected shouldContinueRunning: %v, got: %v", i, c.shouldContinueRunning, shouldContinueRunning)
|
||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning)
|
||||
}
|
||||
if err != c.err {
|
||||
t.Errorf("[%v] expected err: %v, got: %v", i, c.err, err)
|
||||
t.Errorf("[%v] strategy: %v, predicateName: %v expected err: %v, got: %v", i, c.predicateName, c.ds.Spec.UpdateStrategy.Type, c.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1668,14 +1843,14 @@ func TestUpdateNode(t *testing.T) {
|
||||
test string
|
||||
newNode *v1.Node
|
||||
oldNode *v1.Node
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
shouldEnqueue bool
|
||||
}{
|
||||
{
|
||||
test: "Nothing changed, should not enqueue",
|
||||
oldNode: newNode("node1", nil),
|
||||
newNode: newNode("node1", nil),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
return ds
|
||||
@ -1686,7 +1861,7 @@ func TestUpdateNode(t *testing.T) {
|
||||
test: "Node labels changed",
|
||||
oldNode: newNode("node1", nil),
|
||||
newNode: newNode("node1", simpleNodeLabel),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
return ds
|
||||
@ -1716,7 +1891,7 @@ func TestUpdateNode(t *testing.T) {
|
||||
manager.dsStore.Add(c.ds)
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0)
|
||||
|
||||
manager.enqueueDaemonSet = func(ds *extensions.DaemonSet) {
|
||||
manager.enqueueDaemonSet = func(ds *apps.DaemonSet) {
|
||||
if ds.Name == "ds" {
|
||||
enqueued = true
|
||||
}
|
||||
@ -1740,7 +1915,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
node *v1.Node
|
||||
existPods []*v1.Pod
|
||||
deletedPod *v1.Pod
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
shouldEnqueue bool
|
||||
}{
|
||||
{
|
||||
@ -1775,7 +1950,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
Spec: podSpec,
|
||||
}
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
|
||||
return ds
|
||||
@ -1820,7 +1995,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
Spec: podSpec,
|
||||
}
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
|
||||
return ds
|
||||
@ -1862,7 +2037,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
Spec: podSpec,
|
||||
}
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
|
||||
return ds
|
||||
@ -1884,15 +2059,15 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
manager.podStore.Add(pod)
|
||||
}
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
}
|
||||
|
||||
manager.enqueueDaemonSetRateLimited = func(ds *extensions.DaemonSet) {
|
||||
manager.enqueueDaemonSetRateLimited = func(ds *apps.DaemonSet) {
|
||||
if ds.Name == "ds" {
|
||||
enqueued = true
|
||||
}
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
generated
vendored
@ -23,9 +23,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -41,7 +40,7 @@ import (
|
||||
|
||||
// rollingUpdate deletes old daemon set pods making sure that no more than
|
||||
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
|
||||
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash string) error {
|
||||
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) error {
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@ -82,7 +81,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash st
|
||||
// constructHistory finds all histories controlled by the given DaemonSet, and
|
||||
// update current history revision number, or create current history if need to.
|
||||
// It also deduplicates current history, and adds missing unique labels to existing histories.
|
||||
func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
|
||||
func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
|
||||
var histories []*apps.ControllerRevision
|
||||
var currentHistories []*apps.ControllerRevision
|
||||
histories, err = dsc.controlledHistories(ds)
|
||||
@ -92,10 +91,10 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
||||
for _, history := range histories {
|
||||
// Add the unique label if it's not already added to the history
|
||||
// We use history name instead of computing hash, so that we don't need to worry about hash collision
|
||||
if _, ok := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||
toUpdate := history.DeepCopy()
|
||||
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -130,7 +129,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
||||
if cur.Revision < currRevision {
|
||||
toUpdate := cur.DeepCopy()
|
||||
toUpdate.Revision = currRevision
|
||||
_, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -139,7 +138,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
||||
return cur, old, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []*apps.ControllerRevision) error {
|
||||
func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
|
||||
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@ -155,7 +154,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []
|
||||
liveHashes := make(map[string]bool)
|
||||
for _, pods := range nodesToDaemonPods {
|
||||
for _, pod := range pods {
|
||||
if hash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
|
||||
if hash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
|
||||
liveHashes[hash] = true
|
||||
}
|
||||
}
|
||||
@ -164,7 +163,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []
|
||||
// Find all live history with the above hashes
|
||||
liveHistory := make(map[string]bool)
|
||||
for _, history := range old {
|
||||
if hash := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
|
||||
if hash := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
|
||||
liveHistory[history.Name] = true
|
||||
}
|
||||
}
|
||||
@ -199,7 +198,7 @@ func maxRevision(histories []*apps.ControllerRevision) int64 {
|
||||
return max
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||
func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||
if len(curHistories) == 1 {
|
||||
return curHistories[0], nil
|
||||
}
|
||||
@ -222,12 +221,12 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
|
||||
return nil, err
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] {
|
||||
if pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] {
|
||||
toUpdate := pod.DeepCopy()
|
||||
if toUpdate.Labels == nil {
|
||||
toUpdate.Labels = make(map[string]string)
|
||||
}
|
||||
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -247,7 +246,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned histories are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) ([]*apps.ControllerRevision, error) {
|
||||
func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -277,7 +276,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) (
|
||||
}
|
||||
|
||||
// Match check if the given DaemonSet's template matches the template stored in the given history.
|
||||
func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, error) {
|
||||
func Match(ds *apps.DaemonSet, history *apps.ControllerRevision) (bool, error) {
|
||||
patch, err := getPatch(ds)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -289,7 +288,7 @@ func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, er
|
||||
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
|
||||
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
|
||||
// recorded patches.
|
||||
func getPatch(ds *extensions.DaemonSet) ([]byte, error) {
|
||||
func getPatch(ds *apps.DaemonSet) ([]byte, error) {
|
||||
dsBytes, err := json.Marshal(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -312,7 +311,7 @@ func getPatch(ds *extensions.DaemonSet) ([]byte, error) {
|
||||
return patch, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
|
||||
func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
|
||||
patch, err := getPatch(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -323,7 +322,7 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ds.Namespace,
|
||||
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, extensions.DefaultDaemonSetUniqueLabelKey, hash),
|
||||
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, hash),
|
||||
Annotations: ds.Annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)},
|
||||
},
|
||||
@ -331,10 +330,10 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
|
||||
Revision: revision,
|
||||
}
|
||||
|
||||
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Create(history)
|
||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
|
||||
if errors.IsAlreadyExists(err) {
|
||||
// TODO: Is it okay to get from historyLister?
|
||||
existedHistory, getErr := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
@ -367,13 +366,19 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
|
||||
return history, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
|
||||
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
|
||||
var newPods []*v1.Pod
|
||||
var oldPods []*v1.Pod
|
||||
|
||||
for _, pods := range nodeToDaemonPods {
|
||||
for _, pod := range pods {
|
||||
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) {
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
if util.IsPodUpdated(pod, hash, generation) {
|
||||
newPods = append(newPods, pod)
|
||||
} else {
|
||||
oldPods = append(oldPods, pod)
|
||||
@ -383,7 +388,7 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, n
|
||||
return newPods, oldPods
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
glog.V(4).Infof("Getting unavailable numbers")
|
||||
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/controller/daemon/update_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/controller/daemon/update_test.go
generated
vendored
@ -19,8 +19,8 @@ package daemon
|
||||
import (
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
@ -38,10 +38,9 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.TemplateGeneration++
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
@ -80,10 +79,9 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.TemplateGeneration++
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// new pods are not ready numUnavailable == maxUnavailable
|
||||
@ -109,10 +107,9 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.TemplateGeneration++
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// all old pods are unavailable so should be removed
|
||||
@ -137,9 +134,9 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// template is not changed no pod should be removed
|
||||
@ -152,7 +149,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
Manager *daemonSetsController
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
nodeToPods map[string][]*v1.Pod
|
||||
maxUnavailable int
|
||||
numUnavailable int
|
||||
@ -167,10 +164,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: make(map[string][]*v1.Pod),
|
||||
@ -187,10 +184,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(1)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@ -216,10 +213,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@ -242,10 +239,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@ -271,10 +268,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
@ -15,8 +15,8 @@ go_library(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -40,8 +40,7 @@ filegroup(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["daemonset_util_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/daemon/util",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
44
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
44
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
@ -18,7 +18,9 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -27,14 +29,29 @@ import (
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
// deprecated annotation. If no annotation is found nil is returned. If the annotation is found and fails to parse
|
||||
// nil is returned with an error. If the generation can be parsed from the annotation, a pointer to the parsed int64
|
||||
// value is returned.
|
||||
func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
|
||||
annotation, found := ds.Annotations[apps.DeprecatedTemplateGeneration]
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
generation, err := strconv.ParseInt(annotation, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &generation, nil
|
||||
}
|
||||
|
||||
// CreatePodTemplate returns copy of provided template with additional
|
||||
// label which contains templateGeneration (for backward compatibility),
|
||||
// hash of provided template and sets default daemon tolerations.
|
||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash string) v1.PodTemplateSpec {
|
||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
|
||||
newTemplate := *template.DeepCopy()
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
@ -81,12 +98,12 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin
|
||||
})
|
||||
}
|
||||
|
||||
templateGenerationStr := fmt.Sprint(generation)
|
||||
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
template.ObjectMeta.Labels,
|
||||
extensions.DaemonSetTemplateGenerationKey,
|
||||
templateGenerationStr,
|
||||
)
|
||||
if newTemplate.ObjectMeta.Labels == nil {
|
||||
newTemplate.ObjectMeta.Labels = make(map[string]string)
|
||||
}
|
||||
if generation != nil {
|
||||
newTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] = fmt.Sprint(*generation)
|
||||
}
|
||||
// TODO: do we need to validate if the DaemonSet is RollingUpdate or not?
|
||||
if len(hash) > 0 {
|
||||
newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash
|
||||
@ -94,15 +111,16 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin
|
||||
return newTemplate
|
||||
}
|
||||
|
||||
// IsPodUpdate checks if pod contains label value that either matches templateGeneration or hash
|
||||
func IsPodUpdated(dsTemplateGeneration int64, pod *v1.Pod, hash string) bool {
|
||||
// IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash
|
||||
func IsPodUpdated(pod *v1.Pod, hash string, dsTemplateGeneration *int64) bool {
|
||||
// Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration
|
||||
templateMatches := pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
|
||||
templateMatches := dsTemplateGeneration != nil &&
|
||||
pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
|
||||
hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash
|
||||
return hashMatches || templateMatches
|
||||
}
|
||||
|
||||
// SplitByAvailablePods splits provided daemon set pods by availabilty
|
||||
// SplitByAvailablePods splits provided daemon set pods by availability
|
||||
func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*v1.Pod) {
|
||||
unavailablePods := []*v1.Pod{}
|
||||
availablePods := []*v1.Pod{}
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
@ -47,13 +47,14 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
}
|
||||
|
||||
func TestIsPodUpdated(t *testing.T) {
|
||||
templateGeneration := int64(12345)
|
||||
templateGeneration := int64Ptr(12345)
|
||||
badGeneration := int64Ptr(12345)
|
||||
hash := "55555"
|
||||
labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash}
|
||||
labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)}
|
||||
tests := []struct {
|
||||
test string
|
||||
templateGeneration int64
|
||||
templateGeneration *int64
|
||||
pod *v1.Pod
|
||||
hash string
|
||||
isUpdated bool
|
||||
@ -95,14 +96,14 @@ func TestIsPodUpdated(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"templateGeneration doesn't match, hash does",
|
||||
templateGeneration + 1,
|
||||
badGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration and hash don't match",
|
||||
templateGeneration + 1,
|
||||
badGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash + "123",
|
||||
false,
|
||||
@ -130,7 +131,7 @@ func TestIsPodUpdated(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
updated := IsPodUpdated(test.templateGeneration, test.pod, test.hash)
|
||||
updated := IsPodUpdated(test.pod, test.hash, test.templateGeneration)
|
||||
if updated != test.isUpdated {
|
||||
t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated)
|
||||
}
|
||||
@ -139,19 +140,19 @@ func TestIsPodUpdated(t *testing.T) {
|
||||
|
||||
func TestCreatePodTemplate(t *testing.T) {
|
||||
tests := []struct {
|
||||
templateGeneration int64
|
||||
templateGeneration *int64
|
||||
hash string
|
||||
expectUniqueLabel bool
|
||||
}{
|
||||
{int64(1), "", false},
|
||||
{int64(2), "3242341807", true},
|
||||
{int64Ptr(1), "", false},
|
||||
{int64Ptr(2), "3242341807", true},
|
||||
}
|
||||
for _, test := range tests {
|
||||
podTemplateSpec := v1.PodTemplateSpec{}
|
||||
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
|
||||
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
if !exists || val != fmt.Sprint(test.templateGeneration) {
|
||||
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", test.templateGeneration, val)
|
||||
if !exists || val != fmt.Sprint(*test.templateGeneration) {
|
||||
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
|
||||
}
|
||||
val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
if test.expectUniqueLabel && (!exists || val != test.hash) {
|
||||
@ -162,3 +163,8 @@ func TestCreatePodTemplate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func int64Ptr(i int) *int64 {
|
||||
li := int64(i)
|
||||
return &li
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/BUILD
generated
vendored
@ -56,8 +56,7 @@ go_test(
|
||||
"rolling_test.go",
|
||||
"sync_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/deployment",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/apps/install:go_default_library",
|
||||
|
70
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/controller/deployment/progress_test.go
generated
vendored
@ -163,13 +163,15 @@ func TestRequeueStuckDeployment(t *testing.T) {
|
||||
dc.enqueueDeployment = dc.enqueue
|
||||
|
||||
for _, test := range tests {
|
||||
if test.nowFn != nil {
|
||||
nowFn = test.nowFn
|
||||
}
|
||||
got := dc.requeueStuckDeployment(test.d, test.status)
|
||||
if got != test.expected {
|
||||
t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected)
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if test.nowFn != nil {
|
||||
nowFn = test.nowFn
|
||||
}
|
||||
got := dc.requeueStuckDeployment(test.d, test.status)
|
||||
if got != test.expected {
|
||||
t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,32 +312,34 @@ func TestSyncRolloutStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
test.allRSs = append(test.allRSs, test.newRS)
|
||||
}
|
||||
|
||||
err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType)
|
||||
switch {
|
||||
case newCond == nil:
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil {
|
||||
t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
}
|
||||
case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason:
|
||||
t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason)
|
||||
case !test.lastUpdate.IsZero() && test.lastUpdate != testTime:
|
||||
t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime)
|
||||
case !test.lastTransition.IsZero() && test.lastTransition != testTime:
|
||||
t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime)
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
test.allRSs = append(test.allRSs, test.newRS)
|
||||
}
|
||||
|
||||
err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType)
|
||||
switch {
|
||||
case newCond == nil:
|
||||
if test.d.Spec.ProgressDeadlineSeconds != nil {
|
||||
t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType)
|
||||
}
|
||||
case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason:
|
||||
t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason)
|
||||
case !test.lastUpdate.IsZero() && test.lastUpdate != testTime:
|
||||
t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime)
|
||||
case !test.lastTransition.IsZero() && test.lastTransition != testTime:
|
||||
t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
15
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate.go
generated
vendored
@ -104,8 +104,19 @@ func oldPodsRunning(newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSe
|
||||
if newRS != nil && newRS.UID == rsUID {
|
||||
continue
|
||||
}
|
||||
if len(podList.Items) > 0 {
|
||||
return true
|
||||
for _, pod := range podList.Items {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
// Don't count pods in terminal state.
|
||||
continue
|
||||
case v1.PodUnknown:
|
||||
// This happens in situation like when the node is temporarily disconnected from the cluster.
|
||||
// If we can't be sure that the pod is not running, we have to count it.
|
||||
return true
|
||||
default:
|
||||
// Pod is not in terminal phase.
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
136
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go
generated
vendored
136
vendor/k8s.io/kubernetes/pkg/controller/deployment/recreate_test.go
generated
vendored
@ -90,34 +90,138 @@ func TestOldPodsRunning(t *testing.T) {
|
||||
oldRSs []*extensions.ReplicaSet
|
||||
podMap map[types.UID]*v1.PodList
|
||||
|
||||
expected bool
|
||||
hasOldPodsRunning bool
|
||||
}{
|
||||
{
|
||||
name: "no old RSs",
|
||||
expected: false,
|
||||
name: "no old RSs",
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with running pods",
|
||||
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
|
||||
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
|
||||
expected: true,
|
||||
name: "old RSs with running pods",
|
||||
oldRSs: []*extensions.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
|
||||
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs without pods but with non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 1, nil)},
|
||||
expected: true,
|
||||
name: "old RSs without pods but with non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs without pods or non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-blabla", 0, 0, nil)},
|
||||
expected: false,
|
||||
name: "old RSs without pods or non-zero status replicas",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state are present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: false,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pod in unknown phase present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodUnknown,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with pending pod present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas with running pod present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
{
|
||||
name: "old RSs with zero status replicas but pods in terminal state and pending are present",
|
||||
oldRSs: []*extensions.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
|
||||
podMap: map[types.UID]*v1.PodList{
|
||||
"uid-1": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"uid-2": {
|
||||
Items: []v1.Pod{},
|
||||
},
|
||||
"uid-3": {
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hasOldPodsRunning: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if expected, got := test.expected, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
|
||||
t.Errorf("%s: expected %t, got %t", test.name, expected, got)
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if expected, got := test.hasOldPodsRunning, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
|
||||
t.Errorf("%s: expected %t, got %t", test.name, expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
45
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync.go
generated
vendored
@ -318,35 +318,40 @@ func (dc *DeploymentController) getNewReplicaSet(d *extensions.Deployment, rsLis
|
||||
createdRS, err := dc.client.ExtensionsV1beta1().ReplicaSets(d.Namespace).Create(&newRS)
|
||||
switch {
|
||||
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
|
||||
// Fetch a copy of the ReplicaSet. If its PodTemplateSpec is semantically deep equal
|
||||
// with the PodTemplateSpec of the Deployment, then that is our new ReplicaSet. Otherwise,
|
||||
// this is a hash collision and we need to increment the collisionCount field in the
|
||||
// status of the Deployment and try the creation again.
|
||||
case errors.IsAlreadyExists(err):
|
||||
alreadyExists = true
|
||||
|
||||
// Fetch a copy of the ReplicaSet.
|
||||
rs, rsErr := dc.rsLister.ReplicaSets(newRS.Namespace).Get(newRS.Name)
|
||||
if rsErr != nil {
|
||||
return nil, rsErr
|
||||
}
|
||||
|
||||
// If the Deployment owns the ReplicaSet and the ReplicaSet's PodTemplateSpec is semantically
|
||||
// deep equal to the PodTemplateSpec of the Deployment, it's the Deployment's new ReplicaSet.
|
||||
// Otherwise, this is a hash collision and we need to increment the collisionCount field in
|
||||
// the status of the Deployment and requeue to try the creation in the next sync.
|
||||
controllerRef := metav1.GetControllerOf(rs)
|
||||
if controllerRef != nil && controllerRef.UID == d.UID && deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
createdRS = rs
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
// Matching ReplicaSet is not equal - increment the collisionCount in the DeploymentStatus
|
||||
// and requeue the Deployment.
|
||||
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
|
||||
if d.Status.CollisionCount == nil {
|
||||
d.Status.CollisionCount = new(int32)
|
||||
}
|
||||
preCollisionCount := *d.Status.CollisionCount
|
||||
*d.Status.CollisionCount++
|
||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||
// error.
|
||||
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
return nil, err
|
||||
if d.Status.CollisionCount == nil {
|
||||
d.Status.CollisionCount = new(int32)
|
||||
}
|
||||
// Pass through the matching ReplicaSet as the new ReplicaSet.
|
||||
createdRS = rs
|
||||
err = nil
|
||||
preCollisionCount := *d.Status.CollisionCount
|
||||
*d.Status.CollisionCount++
|
||||
// Update the collisionCount for the Deployment and let it requeue by returning the original
|
||||
// error.
|
||||
_, dErr := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).UpdateStatus(d)
|
||||
if dErr == nil {
|
||||
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
|
||||
}
|
||||
return nil, err
|
||||
case err != nil:
|
||||
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
|
||||
if d.Spec.ProgressDeadlineSeconds != nil {
|
||||
|
122
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
122
vendor/k8s.io/kubernetes/pkg/controller/deployment/sync_test.go
generated
vendored
@ -267,72 +267,74 @@ func TestScale(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
_ = olderTimestamp
|
||||
t.Log(test.name)
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_ = olderTimestamp
|
||||
t.Log(test.name)
|
||||
fake := fake.Clientset{}
|
||||
dc := &DeploymentController{
|
||||
client: &fake,
|
||||
eventRecorder: &record.FakeRecorder{},
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
if rs == nil {
|
||||
continue
|
||||
}
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
|
||||
if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
continue
|
||||
}
|
||||
if test.newRS != nil {
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
if rs == nil {
|
||||
continue
|
||||
}
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
|
||||
}
|
||||
|
||||
// Construct the nameToSize map that will hold all the sizes we got our of tests
|
||||
// Skip updating the map if the replica set wasn't updated since there will be
|
||||
// no update action for it.
|
||||
nameToSize := make(map[string]int32)
|
||||
if test.newRS != nil {
|
||||
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
|
||||
for _, action := range fake.Actions() {
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if !test.wasntUpdated[rs.Name] {
|
||||
if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Construct the nameToSize map that will hold all the sizes we got our of tests
|
||||
// Skip updating the map if the replica set wasn't updated since there will be
|
||||
// no update action for it.
|
||||
nameToSize := make(map[string]int32)
|
||||
if test.newRS != nil {
|
||||
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
|
||||
continue
|
||||
}
|
||||
if len(test.expectedOld) != len(test.oldRSs) {
|
||||
t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
|
||||
continue
|
||||
}
|
||||
for n := range test.oldRSs {
|
||||
rs := test.oldRSs[n]
|
||||
expected := test.expectedOld[n]
|
||||
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
|
||||
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
|
||||
for _, action := range fake.Actions() {
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if !test.wasntUpdated[rs.Name] {
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
|
||||
return
|
||||
}
|
||||
if len(test.expectedOld) != len(test.oldRSs) {
|
||||
t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
|
||||
return
|
||||
}
|
||||
for n := range test.oldRSs {
|
||||
rs := test.oldRSs[n]
|
||||
expected := test.expectedOld[n]
|
||||
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
@ -45,8 +45,7 @@ go_test(
|
||||
"deployment_util_test.go",
|
||||
"hash_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/deployment/util",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -124,18 +124,6 @@ func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensi
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: remove the duplicate
|
||||
// GetDeploymentConditionInternal returns the condition with the provided type.
|
||||
func GetDeploymentConditionInternal(status internalextensions.DeploymentStatus, condType internalextensions.DeploymentConditionType) *internalextensions.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
|
||||
// we are about to add already exists and has the same status and reason then we are not going to update.
|
||||
func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) {
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/pod_util.go
generated
vendored
@ -30,8 +30,7 @@ import (
|
||||
// see https://github.com/kubernetes/kubernetes/issues/21479
|
||||
type updatePodFunc func(pod *v1.Pod) error
|
||||
|
||||
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored.
|
||||
// The returned bool value can be used to tell if the pod is actually updated.
|
||||
// UpdatePodWithRetries updates a pod with given applyUpdate function.
|
||||
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
|
||||
var pod *v1.Pod
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
@ -45,8 +45,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["disruption_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/disruption",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
@ -617,7 +617,7 @@ func (dc *DisruptionController) getExpectedScale(pdb *policy.PodDisruptionBudget
|
||||
func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]metav1.Time, currentTime time.Time) (currentHealthy int32) {
|
||||
Pod:
|
||||
for _, pod := range pods {
|
||||
// Pod is beeing deleted.
|
||||
// Pod is being deleted.
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/endpoint/BUILD
generated
vendored
@ -40,8 +40,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["endpoints_controller_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/endpoint",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
@ -193,7 +193,7 @@ func (e *EndpointController) addPod(obj interface{}) {
|
||||
pod := obj.(*v1.Pod)
|
||||
services, err := e.getPodServiceMemberships(pod)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to get pod %v/%v's service memberships: %v", pod.Namespace, pod.Name, err))
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to get pod %s/%s's service memberships: %v", pod.Namespace, pod.Name, err))
|
||||
return
|
||||
}
|
||||
for key := range services {
|
||||
@ -269,7 +269,7 @@ func (e *EndpointController) updatePod(old, cur interface{}) {
|
||||
|
||||
podChangedFlag := podChanged(oldPod, newPod)
|
||||
|
||||
// Check if the pod labels have changed, indicating a possibe
|
||||
// Check if the pod labels have changed, indicating a possible
|
||||
// change in the service membership
|
||||
labelsChanged := false
|
||||
if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) ||
|
||||
@ -328,7 +328,7 @@ func (e *EndpointController) deletePod(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Pod: %#v", obj))
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Enqueuing services of deleted pod %s having final state unrecorded", pod.Name)
|
||||
glog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name)
|
||||
e.addPod(pod)
|
||||
}
|
||||
|
||||
@ -571,7 +571,7 @@ func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.Endpoint
|
||||
})
|
||||
readyEps++
|
||||
} else if shouldPodBeInEndpoints(pod) {
|
||||
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
|
||||
glog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
NotReadyAddresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
@ -50,8 +50,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["garbagecollector_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
@ -197,7 +197,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, p
|
||||
// discovered by restMapper during the call to Reset, since they are
|
||||
// distinct discovery clients invalidated at different times. For example,
|
||||
// newResources may contain resources not returned in the restMapper's
|
||||
// discovery call if the resources appeared inbetween the calls. In that
|
||||
// discovery call if the resources appeared in-between the calls. In that
|
||||
// case, the restMapper will fail to map some of newResources until the next
|
||||
// sync period.
|
||||
if err := gc.resyncMonitors(newResources); err != nil {
|
||||
@ -214,7 +214,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, p
|
||||
|
||||
// Finally, keep track of our new state. Do this after all preceding steps
|
||||
// have succeeded to ensure we'll retry on subsequent syncs if an error
|
||||
// occured.
|
||||
// occurred.
|
||||
oldResources = newResources
|
||||
glog.V(2).Infof("synced garbage collector")
|
||||
}, period, stopCh)
|
||||
@ -587,9 +587,9 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
|
||||
preferredResources, err := discoveryClient.ServerPreferredResources()
|
||||
if err != nil {
|
||||
if discovery.IsGroupDiscoveryFailedError(err) {
|
||||
glog.Warning("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
|
||||
glog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
|
||||
} else {
|
||||
glog.Warning("failed to discover preferred resources: %v", err)
|
||||
glog.Warningf("failed to discover preferred resources: %v", err)
|
||||
}
|
||||
}
|
||||
if preferredResources == nil {
|
||||
@ -603,7 +603,7 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
|
||||
for _, rl := range deletableResources {
|
||||
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
|
||||
if err != nil {
|
||||
glog.Warning("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
|
||||
glog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
|
||||
continue
|
||||
}
|
||||
for i := range rl.APIResources {
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go
generated
vendored
@ -83,13 +83,14 @@ type GraphBuilder struct {
|
||||
// After that it is safe to start them here, before that it is not.
|
||||
informersStarted <-chan struct{}
|
||||
|
||||
// stopCh drives shutdown. If it is nil, it indicates that Run() has not been
|
||||
// called yet. If it is non-nil, then when closed it indicates everything
|
||||
// should shut down.
|
||||
//
|
||||
// stopCh drives shutdown. When a receive from it unblocks, monitors will shut down.
|
||||
// This channel is also protected by monitorLock.
|
||||
stopCh <-chan struct{}
|
||||
|
||||
// running tracks whether Run() has been called.
|
||||
// it is protected by monitorLock.
|
||||
running bool
|
||||
|
||||
// metaOnlyClientPool uses a special codec, which removes fields except for
|
||||
// apiVersion, kind, and metadata during decoding.
|
||||
metaOnlyClientPool dynamic.ClientPool
|
||||
@ -275,7 +276,7 @@ func (gb *GraphBuilder) startMonitors() {
|
||||
gb.monitorLock.Lock()
|
||||
defer gb.monitorLock.Unlock()
|
||||
|
||||
if gb.stopCh == nil {
|
||||
if !gb.running {
|
||||
return
|
||||
}
|
||||
|
||||
@ -325,6 +326,7 @@ func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
|
||||
// Set up the stop channel.
|
||||
gb.monitorLock.Lock()
|
||||
gb.stopCh = stopCh
|
||||
gb.running = true
|
||||
gb.monitorLock.Unlock()
|
||||
|
||||
// Start monitors and begin change processing until the stop channel is
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
@ -26,8 +26,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["metaonly_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package metaonly
|
||||
|
||||
@ -46,9 +46,8 @@ func (in *MetadataOnlyObject) DeepCopy() *MetadataOnlyObject {
|
||||
func (in *MetadataOnlyObject) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
@ -80,7 +79,6 @@ func (in *MetadataOnlyObjectList) DeepCopy() *MetadataOnlyObjectList {
|
||||
func (in *MetadataOnlyObjectList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/operations.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/operations.go
generated
vendored
@ -34,7 +34,7 @@ import (
|
||||
// namespace> tuple to a unversioned.APIResource struct.
|
||||
func (gc *GarbageCollector) apiResource(apiVersion, kind string) (*metav1.APIResource, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(apiVersion, kind)
|
||||
mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), apiVersion)
|
||||
mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), fqKind.Version)
|
||||
if err != nil {
|
||||
return nil, newRESTMappingError(kind, apiVersion)
|
||||
}
|
||||
@ -128,7 +128,7 @@ func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string)
|
||||
newFinalizers = append(newFinalizers, f)
|
||||
}
|
||||
if !found {
|
||||
glog.V(5).Infof("the orphan finalizer is already removed from object %s", owner.identity)
|
||||
glog.V(5).Infof("the %s finalizer is already removed from object %s", targetFinalizer, owner.identity)
|
||||
return nil
|
||||
}
|
||||
// remove the owner from dependent's OwnerReferences
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/controller/history/BUILD
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/history/BUILD
generated
vendored
@ -9,12 +9,11 @@ load(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["controller_history_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/history",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
@ -35,7 +34,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/history",
|
||||
deps = [
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -44,9 +43,9 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
],
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/controller/history/controller_history.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/controller/history/controller_history.go
generated
vendored
@ -23,10 +23,10 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1beta1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
@ -56,19 +56,19 @@ func ControllerRevisionName(prefix string, hash uint32) string {
|
||||
}
|
||||
|
||||
// NewControllerRevision returns a ControllerRevision with a ControllerRef pointing to parent and indicating that
|
||||
// parent is of parentKind. The ControllerRevision has labels matching selector, contains Data equal to data, and
|
||||
// parent is of parentKind. The ControllerRevision has labels matching template labels, contains Data equal to data, and
|
||||
// has a Revision equal to revision. The collisionCount is used when creating the name of the ControllerRevision
|
||||
// so the name is likely unique. If the returned error is nil, the returned ControllerRevision is valid. If the
|
||||
// returned error is not nil, the returned ControllerRevision is invalid for use.
|
||||
func NewControllerRevision(parent metav1.Object,
|
||||
parentKind schema.GroupVersionKind,
|
||||
selector labels.Selector,
|
||||
templateLabels map[string]string,
|
||||
data runtime.RawExtension,
|
||||
revision int64,
|
||||
collisionCount *int32) (*apps.ControllerRevision, error) {
|
||||
labelMap, err := labels.ConvertSelectorToLabelsMap(selector.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
labelMap := make(map[string]string)
|
||||
for k, v := range templateLabels {
|
||||
labelMap[k] = v
|
||||
}
|
||||
blockOwnerDeletion := true
|
||||
isController := true
|
||||
@ -251,7 +251,7 @@ func (rh *realHistory) CreateControllerRevision(parent metav1.Object, revision *
|
||||
hash := HashControllerRevision(revision, collisionCount)
|
||||
// Update the revisions name and labels
|
||||
clone.Name = ControllerRevisionName(parent.GetName(), hash)
|
||||
created, err := rh.client.AppsV1beta1().ControllerRevisions(parent.GetNamespace()).Create(clone)
|
||||
created, err := rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Create(clone)
|
||||
if errors.IsAlreadyExists(err) {
|
||||
*collisionCount++
|
||||
continue
|
||||
@ -267,7 +267,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio
|
||||
return nil
|
||||
}
|
||||
clone.Revision = newRevision
|
||||
updated, updateErr := rh.client.AppsV1beta1().ControllerRevisions(clone.Namespace).Update(clone)
|
||||
updated, updateErr := rh.client.AppsV1().ControllerRevisions(clone.Namespace).Update(clone)
|
||||
if updateErr == nil {
|
||||
return nil
|
||||
}
|
||||
@ -284,7 +284,7 @@ func (rh *realHistory) UpdateControllerRevision(revision *apps.ControllerRevisio
|
||||
}
|
||||
|
||||
func (rh *realHistory) DeleteControllerRevision(revision *apps.ControllerRevision) error {
|
||||
return rh.client.AppsV1beta1().ControllerRevisions(revision.Namespace).Delete(revision.Name, nil)
|
||||
return rh.client.AppsV1().ControllerRevisions(revision.Namespace).Delete(revision.Name, nil)
|
||||
}
|
||||
|
||||
func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind schema.GroupVersionKind, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||
@ -293,7 +293,7 @@ func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind
|
||||
return nil, fmt.Errorf("attempt to adopt revision owned by %v", owner)
|
||||
}
|
||||
// Use strategic merge patch to add an owner reference indicating a controller ref
|
||||
return rh.client.AppsV1beta1().ControllerRevisions(parent.GetNamespace()).Patch(revision.GetName(),
|
||||
return rh.client.AppsV1().ControllerRevisions(parent.GetNamespace()).Patch(revision.GetName(),
|
||||
types.StrategicMergePatchType, []byte(fmt.Sprintf(
|
||||
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
|
||||
parentKind.GroupVersion().String(), parentKind.Kind,
|
||||
@ -302,7 +302,7 @@ func (rh *realHistory) AdoptControllerRevision(parent metav1.Object, parentKind
|
||||
|
||||
func (rh *realHistory) ReleaseControllerRevision(parent metav1.Object, revision *apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||
// Use strategic merge patch to add an owner reference indicating a controller ref
|
||||
released, err := rh.client.AppsV1beta1().ControllerRevisions(revision.GetNamespace()).Patch(revision.GetName(),
|
||||
released, err := rh.client.AppsV1().ControllerRevisions(revision.GetNamespace()).Patch(revision.GetName(),
|
||||
types.StrategicMergePatchType,
|
||||
[]byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, parent.GetUID(), revision.UID)))
|
||||
|
||||
|
226
vendor/k8s.io/kubernetes/pkg/controller/history/controller_history_test.go
generated
vendored
226
vendor/k8s.io/kubernetes/pkg/controller/history/controller_history_test.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@ -56,7 +56,7 @@ func TestRealHistory_ListControllerRevisions(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
for i := range test.revisions {
|
||||
informer.Informer().GetIndexer().Add(test.revisions[i])
|
||||
@ -81,26 +81,22 @@ func TestRealHistory_ListControllerRevisions(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, nil)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, nil)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, nil)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss2Rev1.Namespace = ss2.Namespace
|
||||
ss1Orphan, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 3, nil)
|
||||
ss1Orphan, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 3, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -157,7 +153,7 @@ func TestFakeHistory_ListControllerRevisions(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
for i := range test.revisions {
|
||||
informer.Informer().GetIndexer().Add(test.revisions[i])
|
||||
@ -182,26 +178,22 @@ func TestFakeHistory_ListControllerRevisions(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, nil)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, nil)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, nil)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss2Rev1.Namespace = ss2.Namespace
|
||||
ss1Orphan, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 3, nil)
|
||||
ss1Orphan, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 3, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -260,7 +252,7 @@ func TestRealHistory_CreateControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
history := NewHistory(client, informer.Lister())
|
||||
|
||||
@ -304,25 +296,17 @@ func TestRealHistory_CreateControllerRevision(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -391,7 +375,7 @@ func TestFakeHistory_CreateControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
history := NewFakeHistory(informer)
|
||||
|
||||
@ -435,25 +419,17 @@ func TestFakeHistory_CreateControllerRevision(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -549,7 +525,7 @@ func TestRealHistory_UpdateControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
history := NewHistory(client, informer.Lister())
|
||||
var collisionCount int32
|
||||
@ -575,17 +551,12 @@ func TestRealHistory_UpdateControllerRevision(t *testing.T) {
|
||||
}
|
||||
ss1 := newStatefulSet(3, "ss1", types.UID("ss1"), map[string]string{"foo": "bar"})
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -681,7 +652,7 @@ func TestFakeHistory_UpdateControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
history := NewFakeHistory(informer)
|
||||
var collisionCount int32
|
||||
@ -704,17 +675,13 @@ func TestFakeHistory_UpdateControllerRevision(t *testing.T) {
|
||||
}
|
||||
ss1 := newStatefulSet(3, "ss1", types.UID("ss1"), map[string]string{"foo": "bar"})
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -773,7 +740,7 @@ func TestRealHistory_DeleteControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
history := NewHistory(client, informer.Lister())
|
||||
var collisionCount int32
|
||||
@ -795,30 +762,22 @@ func TestRealHistory_DeleteControllerRevision(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss2Rev1.Namespace = ss2.Namespace
|
||||
ss2Rev2, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 2, ss2.Status.CollisionCount)
|
||||
ss2Rev2, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 2, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -884,7 +843,7 @@ func TestFakeHistory_DeleteControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
history := NewFakeHistory(informer)
|
||||
var collisionCount int32
|
||||
@ -906,30 +865,22 @@ func TestFakeHistory_DeleteControllerRevision(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss2Rev1.Namespace = ss2.Namespace
|
||||
ss2Rev2, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 2, ss2.Status.CollisionCount)
|
||||
ss2Rev2, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 2, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1029,7 +980,7 @@ func TestRealHistory_AdoptControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
|
||||
history := NewHistory(client, informer.Lister())
|
||||
@ -1056,26 +1007,18 @@ func TestRealHistory_AdoptControllerRevision(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss1Rev2.OwnerReferences = []metav1.OwnerReference{}
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1143,7 +1086,7 @@ func TestFakeHistory_AdoptControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
|
||||
history := NewFakeHistory(informer)
|
||||
@ -1170,26 +1113,18 @@ func TestFakeHistory_AdoptControllerRevision(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss1Rev2.OwnerReferences = []metav1.OwnerReference{}
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1296,7 +1231,7 @@ func TestRealHistory_ReleaseControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
|
||||
history := NewHistory(client, informer.Lister())
|
||||
@ -1326,26 +1261,18 @@ func TestRealHistory_ReleaseControllerRevision(t *testing.T) {
|
||||
|
||||
ss1 := newStatefulSet(3, "ss1", types.UID("ss1"), map[string]string{"foo": "bar"})
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, nil)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, nil)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss1Rev2.OwnerReferences = []metav1.OwnerReference{}
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, nil)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1426,7 +1353,7 @@ func TestFakeHistory_ReleaseControllerRevision(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
informer := informerFactory.Apps().V1beta1().ControllerRevisions()
|
||||
informer := informerFactory.Apps().V1().ControllerRevisions()
|
||||
informerFactory.WaitForCacheSync(stop)
|
||||
history := NewFakeHistory(informer)
|
||||
var collisionCount int32
|
||||
@ -1457,26 +1384,18 @@ func TestFakeHistory_ReleaseControllerRevision(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss1Rev2.OwnerReferences = []metav1.OwnerReference{}
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1563,31 +1482,23 @@ func TestFindEqualRevisions(t *testing.T) {
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
ss2 := newStatefulSet(3, "ss2", types.UID("ss2"), map[string]string{"goo": "car"})
|
||||
ss2.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sel2, err := metav1.LabelSelectorAsSelector(ss2.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss1Rev2.OwnerReferences = []metav1.OwnerReference{}
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
ss2Rev1, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 1, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss2Rev1.Namespace = ss2.Namespace
|
||||
ss2Rev2, err := NewControllerRevision(ss2, parentKind, sel2, rawTemplate(&ss2.Spec.Template), 2, ss2.Status.CollisionCount)
|
||||
ss2Rev2, err := NewControllerRevision(ss2, parentKind, ss2.Spec.Template.Labels, rawTemplate(&ss2.Spec.Template), 2, ss2.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1633,22 +1544,18 @@ func TestSortControllerRevisions(t *testing.T) {
|
||||
}
|
||||
ss1 := newStatefulSet(3, "ss1", types.UID("ss1"), map[string]string{"foo": "bar"})
|
||||
ss1.Status.CollisionCount = new(int32)
|
||||
sel1, err := metav1.LabelSelectorAsSelector(ss1.Spec.Selector)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
ss1Rev1, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 1, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev1.Namespace = ss1.Namespace
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev2, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ss1Rev2.Namespace = ss1.Namespace
|
||||
ss1Rev3, err := NewControllerRevision(ss1, parentKind, sel1, rawTemplate(&ss1.Spec.Template), 2, ss1.Status.CollisionCount)
|
||||
ss1Rev3, err := NewControllerRevision(ss1, parentKind, ss1.Spec.Template.Labels, rawTemplate(&ss1.Spec.Template), 3, ss1.Status.CollisionCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -1682,10 +1589,20 @@ func TestSortControllerRevisions(t *testing.T) {
|
||||
}
|
||||
|
||||
func newStatefulSet(replicas int, name string, uid types.UID, labels map[string]string) *apps.StatefulSet {
|
||||
// Converting all the map-only selectors to set-based selectors.
|
||||
var testMatchExpressions []metav1.LabelSelectorRequirement
|
||||
for key, value := range labels {
|
||||
sel := metav1.LabelSelectorRequirement{
|
||||
Key: key,
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{value},
|
||||
}
|
||||
testMatchExpressions = append(testMatchExpressions, sel)
|
||||
}
|
||||
return &apps.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1beta1",
|
||||
APIVersion: "apps/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -1694,7 +1611,10 @@ func newStatefulSet(replicas int, name string, uid types.UID, labels map[string]
|
||||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
// Purposely leaving MatchLabels nil, so to ensure it will break if any link
|
||||
// in the chain ignores the set-based MatchExpressions.
|
||||
MatchLabels: nil,
|
||||
MatchExpressions: testMatchExpressions,
|
||||
},
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Template: v1.PodTemplateSpec{
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/job/BUILD
generated
vendored
@ -45,8 +45,7 @@ go_test(
|
||||
"job_controller_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/job",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
@ -553,6 +553,14 @@ func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
}
|
||||
|
||||
forget := false
|
||||
// Check if the number of jobs succeeded increased since the last check. If yes "forget" should be true
|
||||
// This logic is linked to the issue: https://github.com/kubernetes/kubernetes/issues/56853 that aims to
|
||||
// improve the Job backoff policy when parallelism > 1 and few Jobs failed but others succeed.
|
||||
// In this case, we should clear the backoff delay.
|
||||
if job.Status.Succeeded < succeeded {
|
||||
forget = true
|
||||
}
|
||||
|
||||
// no need to update the job if the status hasn't changed since last time
|
||||
if job.Status.Active != active || job.Status.Succeeded != succeeded || job.Status.Failed != failed || len(job.Status.Conditions) != conditions {
|
||||
job.Status.Active = active
|
||||
@ -560,12 +568,12 @@ func (jm *JobController) syncJob(key string) (bool, error) {
|
||||
job.Status.Failed = failed
|
||||
|
||||
if err := jm.updateHandler(&job); err != nil {
|
||||
return false, err
|
||||
return forget, err
|
||||
}
|
||||
|
||||
if jobHaveNewFailure && !IsJobFinished(&job) {
|
||||
// returning an error will re-enqueue Job after the backoff period
|
||||
return false, fmt.Errorf("failed pod(s) detected for job key %q", key)
|
||||
return forget, fmt.Errorf("failed pod(s) detected for job key %q", key)
|
||||
}
|
||||
|
||||
forget = true
|
||||
|
157
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller_test.go
generated
vendored
157
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller_test.go
generated
vendored
@ -102,24 +102,43 @@ func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod con
|
||||
return jm, sharedInformers
|
||||
}
|
||||
|
||||
func newPod(name string, job *batch.Job) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: job.Spec.Selector.MatchLabels,
|
||||
Namespace: job.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(job, controllerKind)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// create count pods with the given phase for the given job
|
||||
func newPodList(count int32, status v1.PodPhase, job *batch.Job) []v1.Pod {
|
||||
pods := []v1.Pod{}
|
||||
for i := int32(0); i < count; i++ {
|
||||
newPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%v", rand.String(10)),
|
||||
Labels: job.Spec.Selector.MatchLabels,
|
||||
Namespace: job.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(job, controllerKind)},
|
||||
},
|
||||
Status: v1.PodStatus{Phase: status},
|
||||
}
|
||||
pods = append(pods, newPod)
|
||||
newPod := newPod(fmt.Sprintf("pod-%v", rand.String(10)), job)
|
||||
newPod.Status = v1.PodStatus{Phase: status}
|
||||
pods = append(pods, *newPod)
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
func setPodsStatuses(podIndexer cache.Indexer, job *batch.Job, pendingPods, activePods, succeededPods, failedPods int32) {
|
||||
for _, pod := range newPodList(pendingPods, v1.PodPending, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(activePods, v1.PodRunning, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(succeededPods, v1.PodSucceeded, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(failedPods, v1.PodFailed, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerSyncJob(t *testing.T) {
|
||||
jobConditionComplete := batch.JobComplete
|
||||
jobConditionFailed := batch.JobFailed
|
||||
@ -199,11 +218,16 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
fmt.Errorf("Fake error"), true, 0, 3, 0, 0,
|
||||
0, 1, 3, 0, 0, nil, "",
|
||||
},
|
||||
"failed pod": {
|
||||
"failed + succeed pods: reset backoff delay": {
|
||||
2, 5, 6, false, 0,
|
||||
fmt.Errorf("Fake error"), false, 0, 1, 1, 1,
|
||||
fmt.Errorf("Fake error"), true, 0, 1, 1, 1,
|
||||
1, 0, 1, 1, 1, nil, "",
|
||||
},
|
||||
"only new failed pod": {
|
||||
2, 5, 6, false, 0,
|
||||
fmt.Errorf("Fake error"), false, 0, 1, 0, 1,
|
||||
1, 0, 1, 0, 1, nil, "",
|
||||
},
|
||||
"job finish": {
|
||||
2, 5, 6, false, 0,
|
||||
nil, true, 0, 0, 5, 0,
|
||||
@ -273,18 +297,7 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
}
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||
for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
setPodsStatuses(podIndexer, job, tc.pendingPods, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
@ -424,15 +437,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
job.Status.StartTime = &start
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
setPodsStatuses(podIndexer, job, 0, tc.activePods, tc.succeededPods, tc.failedPods)
|
||||
|
||||
// run
|
||||
forget, err := manager.syncJob(getKey(job, t))
|
||||
@ -680,17 +685,6 @@ func TestJobPodLookup(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(name string, job *batch.Job) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: job.Spec.Selector.MatchLabels,
|
||||
Namespace: job.Namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(job, controllerKind)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodsForJob(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
@ -1269,3 +1263,78 @@ func bumpResourceVersion(obj metav1.Object) {
|
||||
ver, _ := strconv.ParseInt(obj.GetResourceVersion(), 10, 32)
|
||||
obj.SetResourceVersion(strconv.FormatInt(ver+1, 10))
|
||||
}
|
||||
|
||||
type pods struct {
|
||||
pending int32
|
||||
active int32
|
||||
succeed int32
|
||||
failed int32
|
||||
}
|
||||
|
||||
func TestJobBackoffReset(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
// job setup
|
||||
parallelism int32
|
||||
completions int32
|
||||
backoffLimit int32
|
||||
|
||||
// pod setup - each row is additive!
|
||||
pods []pods
|
||||
}{
|
||||
"parallelism=1": {
|
||||
1, 2, 1,
|
||||
[]pods{
|
||||
{0, 1, 0, 1},
|
||||
{0, 0, 1, 0},
|
||||
},
|
||||
},
|
||||
"parallelism=2 (just failure)": {
|
||||
2, 2, 1,
|
||||
[]pods{
|
||||
{0, 2, 0, 1},
|
||||
{0, 0, 1, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
DefaultJobBackOff = time.Duration(0) // overwrite the default value for testing
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
var actual *batch.Job
|
||||
manager.updateHandler = func(job *batch.Job) error {
|
||||
actual = job
|
||||
return nil
|
||||
}
|
||||
|
||||
// job & pods setup
|
||||
job := newJob(tc.parallelism, tc.completions, tc.backoffLimit)
|
||||
key := getKey(job, t)
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer()
|
||||
|
||||
setPodsStatuses(podIndexer, job, tc.pods[0].pending, tc.pods[0].active, tc.pods[0].succeed, tc.pods[0].failed)
|
||||
manager.queue.Add(key)
|
||||
manager.processNextWorkItem()
|
||||
retries := manager.queue.NumRequeues(key)
|
||||
if retries != 1 {
|
||||
t.Errorf("%s: expected exactly 1 retry, got %d", name, retries)
|
||||
}
|
||||
|
||||
job = actual
|
||||
sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Replace([]interface{}{actual}, actual.ResourceVersion)
|
||||
setPodsStatuses(podIndexer, job, tc.pods[1].pending, tc.pods[1].active, tc.pods[1].succeed, tc.pods[1].failed)
|
||||
manager.processNextWorkItem()
|
||||
retries = manager.queue.NumRequeues(key)
|
||||
if retries != 0 {
|
||||
t.Errorf("%s: expected exactly 0 retries, got %d", name, retries)
|
||||
}
|
||||
if getCondition(actual, batch.JobFailed, "BackoffLimitExceeded") {
|
||||
t.Errorf("%s: unexpected job failure", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/namespace/deletion/BUILD
generated
vendored
@ -29,8 +29,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["namespaced_resources_deleter_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/namespace/deletion",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/BUILD
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/BUILD
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics.go",
|
||||
"node_ipam_controller.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/nodeipam/ipam:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package node contains code for syncing cloud instances with
|
||||
// Package nodeipam contains code for syncing cloud instances with
|
||||
// node registry
|
||||
package node // import "k8s.io/kubernetes/pkg/controller/node"
|
||||
package nodeipam // import "k8s.io/kubernetes/pkg/controller/nodeipam"
|
@ -13,12 +13,11 @@ go_test(
|
||||
"range_allocator_test.go",
|
||||
"timeout_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/node/ipam/test:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/test:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
@ -40,15 +39,15 @@ go_library(
|
||||
"range_allocator.go",
|
||||
"timeout.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam",
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam",
|
||||
deps = [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/node/ipam/sync:go_default_library",
|
||||
"//pkg/controller/node/util:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/sync:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -82,9 +81,9 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/node/ipam/cidrset:all-srcs",
|
||||
"//pkg/controller/node/ipam/sync:all-srcs",
|
||||
"//pkg/controller/node/ipam/test:all-srcs",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:all-srcs",
|
||||
"//pkg/controller/nodeipam/ipam/sync:all-srcs",
|
||||
"//pkg/controller/nodeipam/ipam/test:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
@ -68,7 +68,7 @@ const (
|
||||
cidrUpdateQueueSize = 5000
|
||||
|
||||
// cidrUpdateRetries is the no. of times a NodeSpec update will be retried before dropping it.
|
||||
cidrUpdateRetries = 10
|
||||
cidrUpdateRetries = 3
|
||||
)
|
||||
|
||||
// CIDRAllocator is an interface implemented by things that know how
|
@ -9,15 +9,14 @@ load(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["cidr_set_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/github.com/golang/glog:go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["cidr_set.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset",
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset",
|
||||
)
|
||||
|
||||
filegroup(
|
@ -40,7 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/node/util"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
@ -101,8 +101,8 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
}
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: util.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
|
||||
UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||
AddFunc: nodeutil.CreateAddNodeHandler(ca.AllocateOrOccupyCIDR),
|
||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||
if newNode.Spec.PodCIDR == "" {
|
||||
return ca.AllocateOrOccupyCIDR(newNode)
|
||||
}
|
||||
@ -114,7 +114,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
DeleteFunc: util.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR),
|
||||
})
|
||||
|
||||
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
@ -191,17 +191,21 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
|
||||
// updateCIDRAllocation assigns CIDR to Node and sends an update to the API server.
|
||||
func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
var err error
|
||||
var node *v1.Node
|
||||
defer ca.removeNodeFromProcessing(nodeName)
|
||||
|
||||
node, err := ca.nodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
cidrs, err := ca.cloud.AliasRanges(types.NodeName(nodeName))
|
||||
if err != nil {
|
||||
util.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
||||
return fmt.Errorf("failed to allocate cidr: %v", err)
|
||||
}
|
||||
if len(cidrs) == 0 {
|
||||
util.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRNotAvailable")
|
||||
return fmt.Errorf("failed to allocate cidr: Node %v has no CIDRs", node.Name)
|
||||
}
|
||||
_, cidr, err := net.ParseCIDR(cidrs[0])
|
||||
@ -210,35 +214,28 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
}
|
||||
podCIDR := cidr.String()
|
||||
|
||||
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
||||
node, err = ca.nodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", nodeName, err)
|
||||
continue
|
||||
}
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
// We don't return here, in order to set the NetworkUnavailable condition later below.
|
||||
} else {
|
||||
if node.Spec.PodCIDR != "" {
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
// We don't return to set the NetworkUnavailable condition if needed.
|
||||
break
|
||||
}
|
||||
glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v",
|
||||
node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
// We fall through and set the CIDR despite this error. This
|
||||
// implements the same logic as implemented in the
|
||||
// rangeAllocator.
|
||||
//
|
||||
// See https://github.com/kubernetes/kubernetes/pull/42147#discussion_r103357248
|
||||
}
|
||||
if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
}
|
||||
}
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v (%d retries left): %v", node.Name, podCIDR, cidrUpdateRetries-rep-1, err)
|
||||
}
|
||||
if err != nil {
|
||||
util.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
|
||||
glog.Errorf("CIDR assignment for node %v failed: %v.", nodeName, err)
|
||||
nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed")
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
return err
|
||||
}
|
||||
|
@ -30,9 +30,9 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
||||
nodesync "k8s.io/kubernetes/pkg/controller/node/ipam/sync"
|
||||
"k8s.io/kubernetes/pkg/controller/node/util"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
)
|
||||
|
||||
// Config for the IPAM controller.
|
||||
@ -96,7 +96,7 @@ func NewController(
|
||||
}
|
||||
|
||||
// Start initializes the Controller with the existing list of nodes and
|
||||
// registers the informers for node chnages. This will start synchronization
|
||||
// registers the informers for node changes. This will start synchronization
|
||||
// of the node and cloud CIDR range allocations.
|
||||
func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
glog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
|
||||
@ -128,9 +128,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
|
||||
}
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: util.CreateAddNodeHandler(c.onAdd),
|
||||
UpdateFunc: util.CreateUpdateNodeHandler(c.onUpdate),
|
||||
DeleteFunc: util.CreateDeleteNodeHandler(c.onDelete),
|
||||
AddFunc: nodeutil.CreateAddNodeHandler(c.onAdd),
|
||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(c.onUpdate),
|
||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(c.onDelete),
|
||||
})
|
||||
|
||||
return nil
|
||||
@ -193,7 +193,7 @@ func (c *Controller) onUpdate(_, node *v1.Node) error {
|
||||
if sync, ok := c.syncers[node.Name]; ok {
|
||||
sync.Update(node)
|
||||
} else {
|
||||
glog.Errorf("Received update for non-existant node %q", node.Name)
|
||||
glog.Errorf("Received update for non-existent node %q", node.Name)
|
||||
return fmt.Errorf("unknown node %q", node.Name)
|
||||
}
|
||||
|
@ -20,8 +20,8 @@ import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/test"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
)
|
||||
|
||||
func TestOccupyServiceCIDR(t *testing.T) {
|
@ -36,9 +36,9 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/node/util"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
type rangeAllocator struct {
|
||||
@ -119,8 +119,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
}
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: util.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
|
||||
UpdateFunc: util.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||
AddFunc: nodeutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR),
|
||||
UpdateFunc: nodeutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error {
|
||||
// If the PodCIDR is not empty we either:
|
||||
// - already processed a Node that already had a CIDR after NC restarted
|
||||
// (cidr is marked as used),
|
||||
@ -145,7 +145,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No
|
||||
}
|
||||
return nil
|
||||
}),
|
||||
DeleteFunc: util.CreateDeleteNodeHandler(ra.ReleaseCIDR),
|
||||
DeleteFunc: nodeutil.CreateDeleteNodeHandler(ra.ReleaseCIDR),
|
||||
})
|
||||
|
||||
return ra, nil
|
||||
@ -234,7 +234,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
podCIDR, err := r.cidrs.AllocateNext()
|
||||
if err != nil {
|
||||
r.removeNodeFromProcessing(node.Name)
|
||||
util.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable")
|
||||
return fmt.Errorf("failed to allocate cidr: %v", err)
|
||||
}
|
||||
|
||||
@ -286,39 +286,40 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
||||
defer r.removeNodeFromProcessing(data.nodeName)
|
||||
|
||||
podCIDR := data.cidr.String()
|
||||
for rep := 0; rep < cidrUpdateRetries; rep++ {
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v to retry updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
continue
|
||||
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if node.Spec.PodCIDR == podCIDR {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
if err := r.cidrs.Release(data.cidr); err != nil {
|
||||
glog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.V(4).Infof("Node %v already has allocated CIDR %v. Releasing assigned one if different.", node.Name, node.Spec.PodCIDR)
|
||||
if node.Spec.PodCIDR != podCIDR {
|
||||
glog.Errorf("Node %q PodCIDR seems to have changed (original=%v, current=%v), releasing original and occupying new CIDR",
|
||||
node.Name, node.Spec.PodCIDR, podCIDR)
|
||||
if err := r.cidrs.Release(data.cidr); err != nil {
|
||||
glog.Errorf("Error when releasing CIDR %v", podCIDR)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// If we reached here, it means that the node has no CIDR currently assigned. So we set it.
|
||||
for i := 0; i < cidrUpdateRetries; i++ {
|
||||
if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
return nil
|
||||
}
|
||||
if err = nodeutil.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil {
|
||||
glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR)
|
||||
break
|
||||
}
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v (%d retries left): %v", node.Name, podCIDR, cidrUpdateRetries-rep-1, err)
|
||||
}
|
||||
if err != nil {
|
||||
util.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
||||
// We accept the fact that we may leek CIDRs here. This is safer than releasing
|
||||
// them in case when we don't know if request went through.
|
||||
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
||||
if !apierrors.IsServerTimeout(err) {
|
||||
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", data.nodeName, err)
|
||||
if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
|
||||
glog.Errorf("Error releasing allocated CIDR for node %v: %v", data.nodeName, releaseErr)
|
||||
}
|
||||
glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err)
|
||||
nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed")
|
||||
// We accept the fact that we may leak CIDRs here. This is safer than releasing
|
||||
// them in case when we don't know if request went through.
|
||||
// NodeController restart will return all falsely allocated CIDRs to the pool.
|
||||
if !apierrors.IsServerTimeout(err) {
|
||||
glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err)
|
||||
if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil {
|
||||
glog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr)
|
||||
}
|
||||
}
|
||||
return err
|
@ -3,10 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["sync.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/sync",
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
@ -15,11 +15,10 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["sync_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/sync",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller/node/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/node/ipam/test:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/cidrset:go_default_library",
|
||||
"//pkg/controller/nodeipam/ipam/test:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
@ -25,7 +25,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
)
|
||||
|
||||
const (
|
@ -26,8 +26,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam/test"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["utils.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/ipam/test",
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/metrics.go
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/metrics.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeipam
|
||||
|
||||
// Register the metrics that are to be monitored.
|
||||
func Register() {
|
||||
}
|
187
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller.go
generated
vendored
Normal file
187
vendor/k8s.io/kubernetes/pkg/controller/nodeipam/node_ipam_controller.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeipam
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
nodesync "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register prometheus metrics
|
||||
Register()
|
||||
}
|
||||
|
||||
const (
|
||||
// ipamResyncInterval is the amount of time between when the cloud and node
|
||||
// CIDR range assignments are synchronized.
|
||||
ipamResyncInterval = 30 * time.Second
|
||||
// ipamMaxBackoff is the maximum backoff for retrying synchronization of a
|
||||
// given in the error state.
|
||||
ipamMaxBackoff = 10 * time.Second
|
||||
// ipamInitialRetry is the initial retry interval for retrying synchronization of a
|
||||
// given in the error state.
|
||||
ipamInitialBackoff = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
// Controller is the controller that manages node ipam state.
|
||||
type Controller struct {
|
||||
allocateNodeCIDRs bool
|
||||
allocatorType ipam.CIDRAllocatorType
|
||||
|
||||
cloud cloudprovider.Interface
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
kubeClient clientset.Interface
|
||||
// Method for easy mocking in unittest.
|
||||
lookupIP func(host string) ([]net.IP, error)
|
||||
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
|
||||
cidrAllocator ipam.CIDRAllocator
|
||||
|
||||
forcefullyDeletePod func(*v1.Pod) error
|
||||
}
|
||||
|
||||
// NewNodeIpamController returns a new node IP Address Management controller to
|
||||
// sync instances from cloudprovider.
|
||||
// This method returns an error if it is unable to initialize the CIDR bitmap with
|
||||
// podCIDRs it has already allocated to nodes. Since we don't allow podCIDR changes
|
||||
// currently, this should be handled as a fatal error.
|
||||
func NewNodeIpamController(
|
||||
nodeInformer coreinformers.NodeInformer,
|
||||
cloud cloudprovider.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool,
|
||||
allocatorType ipam.CIDRAllocatorType) (*Controller, error) {
|
||||
|
||||
if kubeClient == nil {
|
||||
glog.Fatalf("kubeClient is nil when starting Controller")
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""),
|
||||
})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
}
|
||||
|
||||
if allocateNodeCIDRs {
|
||||
if clusterCIDR == nil {
|
||||
glog.Fatal("Controller: Must specify clusterCIDR if allocateNodeCIDRs == true.")
|
||||
}
|
||||
mask := clusterCIDR.Mask
|
||||
if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize {
|
||||
glog.Fatal("Controller: Invalid clusterCIDR, mask size of clusterCIDR must be less than nodeCIDRMaskSize.")
|
||||
}
|
||||
}
|
||||
|
||||
ic := &Controller{
|
||||
cloud: cloud,
|
||||
kubeClient: kubeClient,
|
||||
lookupIP: net.LookupIP,
|
||||
clusterCIDR: clusterCIDR,
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
allocatorType: allocatorType,
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if ic.allocateNodeCIDRs {
|
||||
if ic.allocatorType == ipam.IPAMFromClusterAllocatorType || ic.allocatorType == ipam.IPAMFromCloudAllocatorType {
|
||||
cfg := &ipam.Config{
|
||||
Resync: ipamResyncInterval,
|
||||
MaxBackoff: ipamMaxBackoff,
|
||||
InitialRetry: ipamInitialBackoff,
|
||||
}
|
||||
switch ic.allocatorType {
|
||||
case ipam.IPAMFromClusterAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCluster
|
||||
case ipam.IPAMFromCloudAllocatorType:
|
||||
cfg.Mode = nodesync.SyncFromCloud
|
||||
}
|
||||
ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating ipam controller: %v", err)
|
||||
}
|
||||
if err := ipamc.Start(nodeInformer); err != nil {
|
||||
glog.Fatalf("Error trying to Init(): %v", err)
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
ic.cidrAllocator, err = ipam.New(
|
||||
kubeClient, cloud, nodeInformer, ic.allocatorType, ic.clusterCIDR, ic.serviceCIDR, nodeCIDRMaskSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ic.nodeLister = nodeInformer.Lister()
|
||||
ic.nodeInformerSynced = nodeInformer.Informer().HasSynced
|
||||
|
||||
return ic, nil
|
||||
}
|
||||
|
||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
||||
func (nc *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
glog.Infof("Starting ipam controller")
|
||||
defer glog.Infof("Shutting down ipam controller")
|
||||
|
||||
if !controller.WaitForCacheSync("node", stopCh, nc.nodeInformerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Abstract this check into a generic controller manager should run method.
|
||||
if nc.allocateNodeCIDRs {
|
||||
if nc.allocatorType != ipam.IPAMFromClusterAllocatorType && nc.allocatorType != ipam.IPAMFromCloudAllocatorType {
|
||||
go nc.cidrAllocator.Run(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
}
|
@ -1,66 +1,25 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["nodecontroller_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/node/ipam:go_default_library",
|
||||
"//pkg/controller/node/scheduler:go_default_library",
|
||||
"//pkg/controller/node/util:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"metrics.go",
|
||||
"node_controller.go",
|
||||
"node_lifecycle_controller.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node",
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodelifecycle",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/node/ipam:go_default_library",
|
||||
"//pkg/controller/node/ipam/sync:go_default_library",
|
||||
"//pkg/controller/node/scheduler:go_default_library",
|
||||
"//pkg/controller/node/util:go_default_library",
|
||||
"//pkg/controller/nodelifecycle/scheduler:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -75,7 +34,6 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
@ -95,9 +53,40 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/node/ipam:all-srcs",
|
||||
"//pkg/controller/node/scheduler:all-srcs",
|
||||
"//pkg/controller/node/util:all-srcs",
|
||||
"//pkg/controller/nodelifecycle/scheduler:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["node_lifecycle_controller_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/nodelifecycle/scheduler:go_default_library",
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//pkg/controller/util/node:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
9
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/OWNERS
generated
vendored
Executable file
9
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/OWNERS
generated
vendored
Executable file
@ -0,0 +1,9 @@
|
||||
approvers:
|
||||
- gmarek
|
||||
- bowei
|
||||
reviewers:
|
||||
- gmarek
|
||||
- smarterclayton
|
||||
- ingvagabund
|
||||
- aveshagarwal
|
||||
- k82cn
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
package nodelifecycle
|
||||
|
||||
import (
|
||||
"sync"
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -14,10 +14,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
package nodelifecycle
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -39,14 +38,13 @@ import (
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/node/ipam"
|
||||
"k8s.io/kubernetes/pkg/controller/node/scheduler"
|
||||
"k8s.io/kubernetes/pkg/controller/node/util"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -60,13 +58,46 @@ const (
|
||||
|
||||
func alwaysReady() bool { return true }
|
||||
|
||||
type nodeController struct {
|
||||
type nodeLifecycleController struct {
|
||||
*Controller
|
||||
nodeInformer coreinformers.NodeInformer
|
||||
daemonSetInformer extensionsinformers.DaemonSetInformer
|
||||
}
|
||||
|
||||
func newNodeControllerFromClient(
|
||||
// doEviction does the fake eviction and returns the status of eviction operation.
|
||||
func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNodeHandler) bool {
|
||||
var podEvicted bool
|
||||
zones := testutil.GetZones(fakeNodeHandler)
|
||||
for _, zone := range zones {
|
||||
nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||
uid, _ := value.UID.(string)
|
||||
nodeutil.DeletePods(fakeNodeHandler, nc.recorder, value.Value, uid, nc.daemonSetStore)
|
||||
return true, 0
|
||||
})
|
||||
}
|
||||
|
||||
for _, action := range fakeNodeHandler.Actions() {
|
||||
if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" {
|
||||
podEvicted = true
|
||||
return podEvicted
|
||||
}
|
||||
}
|
||||
return podEvicted
|
||||
}
|
||||
|
||||
func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeNodeHandler) error {
|
||||
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newElems := make([]interface{}, 0, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
newElems = append(newElems, &nodes.Items[i])
|
||||
}
|
||||
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
|
||||
}
|
||||
|
||||
func newNodeLifecycleControllerFromClient(
|
||||
cloud cloudprovider.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
podEvictionTimeout time.Duration,
|
||||
@ -77,37 +108,28 @@ func newNodeControllerFromClient(
|
||||
nodeMonitorGracePeriod time.Duration,
|
||||
nodeStartupGracePeriod time.Duration,
|
||||
nodeMonitorPeriod time.Duration,
|
||||
clusterCIDR *net.IPNet,
|
||||
serviceCIDR *net.IPNet,
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool,
|
||||
useTaints bool,
|
||||
) (*nodeController, error) {
|
||||
) (*nodeLifecycleController, error) {
|
||||
|
||||
factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
|
||||
nodeInformer := factory.Core().V1().Nodes()
|
||||
daemonSetInformer := factory.Extensions().V1beta1().DaemonSets()
|
||||
|
||||
nc, err := NewNodeController(
|
||||
nc, err := NewNodeLifecycleController(
|
||||
factory.Core().V1().Pods(),
|
||||
nodeInformer,
|
||||
daemonSetInformer,
|
||||
cloud,
|
||||
kubeClient,
|
||||
nodeMonitorPeriod,
|
||||
nodeStartupGracePeriod,
|
||||
nodeMonitorGracePeriod,
|
||||
podEvictionTimeout,
|
||||
evictionLimiterQPS,
|
||||
secondaryEvictionLimiterQPS,
|
||||
largeClusterThreshold,
|
||||
unhealthyZoneThreshold,
|
||||
nodeMonitorGracePeriod,
|
||||
nodeStartupGracePeriod,
|
||||
nodeMonitorPeriod,
|
||||
clusterCIDR,
|
||||
serviceCIDR,
|
||||
nodeCIDRMaskSize,
|
||||
allocateNodeCIDRs,
|
||||
ipam.RangeAllocatorType,
|
||||
useTaints,
|
||||
useTaints,
|
||||
useTaints,
|
||||
@ -120,19 +142,7 @@ func newNodeControllerFromClient(
|
||||
nc.nodeInformerSynced = alwaysReady
|
||||
nc.daemonSetInformerSynced = alwaysReady
|
||||
|
||||
return &nodeController{nc, nodeInformer, daemonSetInformer}, nil
|
||||
}
|
||||
|
||||
func syncNodeStore(nc *nodeController, fakeNodeHandler *testutil.FakeNodeHandler) error {
|
||||
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newElems := make([]interface{}, 0, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
newElems = append(newElems, &nodes.Items[i])
|
||||
}
|
||||
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
|
||||
return &nodeLifecycleController{nc, nodeInformer, daemonSetInformer}, nil
|
||||
}
|
||||
|
||||
func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
@ -597,7 +607,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodeController, _ := newNodeControllerFromClient(
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
item.fakeNodeHandler,
|
||||
evictionTimeout,
|
||||
@ -608,17 +618,13 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
nil,
|
||||
nil,
|
||||
0,
|
||||
false,
|
||||
false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
for _, ds := range item.daemonSets {
|
||||
nodeController.daemonSetInformer.Informer().GetStore().Add(&ds)
|
||||
}
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -633,7 +639,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
item.fakeNodeHandler.Existing[0].Labels = labels
|
||||
item.fakeNodeHandler.Existing[1].Labels = labels
|
||||
}
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -644,7 +650,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
if _, ok := nodeController.zonePodEvictor[zone]; ok {
|
||||
nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||
nodeUID, _ := value.UID.(string)
|
||||
util.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
|
||||
nodeutil.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
|
||||
return true, 0
|
||||
})
|
||||
} else {
|
||||
@ -763,12 +769,21 @@ func TestPodStatusChange(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodeController, _ := newNodeControllerFromClient(nil, item.fakeNodeHandler,
|
||||
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
item.fakeNodeHandler,
|
||||
evictionTimeout,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -779,7 +794,7 @@ func TestPodStatusChange(t *testing.T) {
|
||||
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
|
||||
item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus
|
||||
}
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -789,7 +804,7 @@ func TestPodStatusChange(t *testing.T) {
|
||||
for _, zone := range zones {
|
||||
nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||
nodeUID, _ := value.UID.(string)
|
||||
util.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
|
||||
nodeutil.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
|
||||
return true, 0
|
||||
})
|
||||
}
|
||||
@ -809,7 +824,6 @@ func TestPodStatusChange(t *testing.T) {
|
||||
t.Errorf("expected pod update: %+v, got %+v for %+v", podReasonUpdate, item.expectedPodUpdate, item.description)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
@ -1280,9 +1294,18 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
Existing: item.nodeList,
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: item.podList}),
|
||||
}
|
||||
nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler,
|
||||
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fakeNodeHandler,
|
||||
evictionTimeout,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.enterPartialDisruptionFunc = func(nodeNum int) float32 {
|
||||
return testRateLimiterQPS
|
||||
@ -1291,7 +1314,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
nodeController.enterFullDisruptionFunc = func(nodeNum int) float32 {
|
||||
return testRateLimiterQPS
|
||||
}
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1309,7 +1332,7 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
fakeNodeHandler.Existing[i].Status = item.updatedNodeStatuses[i]
|
||||
}
|
||||
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1337,27 +1360,6 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// doEviction does the fake eviction and returns the status of eviction operation.
|
||||
func (nc *nodeController) doEviction(fakeNodeHandler *testutil.FakeNodeHandler) bool {
|
||||
var podEvicted bool
|
||||
zones := testutil.GetZones(fakeNodeHandler)
|
||||
for _, zone := range zones {
|
||||
nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
|
||||
uid, _ := value.UID.(string)
|
||||
util.DeletePods(fakeNodeHandler, nc.recorder, value.Value, uid, nc.daemonSetStore)
|
||||
return true, 0
|
||||
})
|
||||
}
|
||||
|
||||
for _, action := range fakeNodeHandler.Actions() {
|
||||
if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" {
|
||||
podEvicted = true
|
||||
return podEvicted
|
||||
}
|
||||
}
|
||||
return podEvicted
|
||||
}
|
||||
|
||||
// TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes
|
||||
// pods and the node when kubelet has not reported, and the cloudprovider says
|
||||
// the node is gone.
|
||||
@ -1384,10 +1386,18 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0"), *testutil.NewPod("pod1", "node0")}}),
|
||||
DeleteWaitChan: make(chan struct{}),
|
||||
}
|
||||
nodeController, _ := newNodeControllerFromClient(nil, fnh, 10*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fnh,
|
||||
10*time.Minute,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
@ -1395,7 +1405,7 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
||||
return false, nil
|
||||
}
|
||||
// monitorNodeStatus should allow this node to be immediately deleted
|
||||
if err := syncNodeStore(nodeController, fnh); err != nil {
|
||||
if err := nodeController.syncNodeStore(fnh); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1455,6 +1465,14 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: fakeNow,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: "NodeStatusNeverUpdated",
|
||||
Message: "Kubelet never posted node status.",
|
||||
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: fakeNow,
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionUnknown,
|
||||
@ -1512,6 +1530,13 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
// Node status hasn't been updated for 1hr.
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
|
||||
@ -1536,6 +1561,13 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
// Node status hasn't been updated for 1hr.
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
|
||||
@ -1558,6 +1590,14 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: "NodeStatusUnknown",
|
||||
Message: "Kubelet stopped posting node status.",
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionUnknown,
|
||||
@ -1624,12 +1664,21 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, item := range table {
|
||||
nodeController, _ := newNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
item.fakeNodeHandler,
|
||||
5*time.Minute,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1638,7 +1687,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
if item.timeToPass > 0 {
|
||||
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} }
|
||||
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1768,12 +1817,21 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, item := range table {
|
||||
nodeController, _ := newNodeControllerFromClient(nil, item.fakeNodeHandler, 5*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
item.fakeNodeHandler,
|
||||
5*time.Minute,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1782,7 +1840,7 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
if item.timeToPass > 0 {
|
||||
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} }
|
||||
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
|
||||
if err := syncNodeStore(nodeController, item.fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1879,12 +1937,21 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
originalTaint := UnreachableTaintTemplate
|
||||
updatedTaint := NotReadyTaintTemplate
|
||||
|
||||
nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler,
|
||||
evictionTimeout, testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fakeNodeHandler,
|
||||
evictionTimeout,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
true)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1922,7 +1989,7 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -1972,9 +2039,18 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
}
|
||||
|
||||
nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, evictionTimeout,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fakeNodeHandler,
|
||||
evictionTimeout,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
true)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
|
||||
@ -2098,11 +2174,11 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
fakeNodeHandler.Update(test.Node)
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
nodeController.doNoScheduleTaintingPass(test.Node)
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
node0, err := nodeController.nodeLister.Get("node0")
|
||||
@ -2150,10 +2226,18 @@ func TestNodeEventGeneration(t *testing.T) {
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
}
|
||||
|
||||
nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, 5*time.Minute,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod, nil, nil, 0, false, false)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fakeNodeHandler,
|
||||
5*time.Minute,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
|
||||
return false, nil
|
||||
@ -2161,7 +2245,7 @@ func TestNodeEventGeneration(t *testing.T) {
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
fakeRecorder := testutil.NewFakeRecorder()
|
||||
nodeController.recorder = fakeRecorder
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
@ -2208,9 +2292,18 @@ func TestFixDeprecatedTaintKey(t *testing.T) {
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
}
|
||||
|
||||
nodeController, _ := newNodeControllerFromClient(nil, fakeNodeHandler, evictionTimeout,
|
||||
testRateLimiterQPS, testRateLimiterQPS, testLargeClusterThreshold, testUnhealthyThreshold, testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false, true)
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fakeNodeHandler,
|
||||
evictionTimeout,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
true)
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
|
||||
@ -2319,11 +2412,11 @@ func TestFixDeprecatedTaintKey(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
fakeNodeHandler.Update(test.Node)
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
nodeController.doFixDeprecatedTaintKeyPass(test.Node)
|
||||
if err := syncNodeStore(nodeController, fakeNodeHandler); err != nil {
|
||||
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
node, err := nodeController.nodeLister.Get(test.Node.GetName())
|
@ -1,39 +1,14 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"rate_limited_queue_test.go",
|
||||
"taint_controller_test.go",
|
||||
"timed_workers_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/scheduler",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"rate_limited_queue.go",
|
||||
"taint_controller.go",
|
||||
"taint_manager.go",
|
||||
"timed_workers.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/node/scheduler",
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
@ -53,6 +28,25 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"rate_limited_queue_test.go",
|
||||
"taint_manager_test.go",
|
||||
"timed_workers_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller/testutil:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
@ -64,4 +58,5 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -293,20 +293,15 @@ func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) {
|
||||
newLimiter = flowcontrol.NewFakeNeverRateLimiter()
|
||||
} else {
|
||||
newLimiter = flowcontrol.NewTokenBucketRateLimiter(newQPS, EvictionRateLimiterBurst)
|
||||
}
|
||||
// If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1
|
||||
// TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep:
|
||||
// - saturation (percentage of used tokens)
|
||||
// - number of used tokens
|
||||
// - number of available tokens
|
||||
// - something else
|
||||
for q.limiter.Saturation() > newLimiter.Saturation() {
|
||||
// Check if we're not using fake limiter
|
||||
previousSaturation := newLimiter.Saturation()
|
||||
newLimiter.TryAccept()
|
||||
// It's a fake limiter
|
||||
if newLimiter.Saturation() == previousSaturation {
|
||||
break
|
||||
|
||||
// If we're currently waiting on limiter, we drain the new one - this is a good approach when Burst value is 1
|
||||
// TODO: figure out if we need to support higher Burst values and decide on the drain logic, should we keep:
|
||||
// - saturation (percentage of used tokens)
|
||||
// - number of used tokens
|
||||
// - number of available tokens
|
||||
// - something else
|
||||
if q.limiter.TryAccept() == false {
|
||||
newLimiter.TryAccept()
|
||||
}
|
||||
}
|
||||
q.limiter.Stop()
|
@ -18,9 +18,6 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
@ -30,6 +27,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
@ -273,7 +273,7 @@ func TestCreateNode(t *testing.T) {
|
||||
expectDelete bool
|
||||
}{
|
||||
{
|
||||
description: "Creating Node maching already assigned Pod",
|
||||
description: "Creating Node matching already assigned Pod",
|
||||
pods: []v1.Pod{
|
||||
*testutil.NewPod("pod1", "node1"),
|
||||
},
|
||||
@ -281,7 +281,7 @@ func TestCreateNode(t *testing.T) {
|
||||
expectDelete: false,
|
||||
},
|
||||
{
|
||||
description: "Creating tainted Node maching already assigned Pod",
|
||||
description: "Creating tainted Node matching already assigned Pod",
|
||||
pods: []v1.Pod{
|
||||
*testutil.NewPod("pod1", "node1"),
|
||||
},
|
||||
@ -289,7 +289,7 @@ func TestCreateNode(t *testing.T) {
|
||||
expectDelete: true,
|
||||
},
|
||||
{
|
||||
description: "Creating tainted Node maching already assigned tolerating Pod",
|
||||
description: "Creating tainted Node matching already assigned tolerating Pod",
|
||||
pods: []v1.Pod{
|
||||
*addToleration(testutil.NewPod("pod1", "node1"), 1, -1),
|
||||
},
|
||||
@ -468,7 +468,7 @@ func TestUpdateNodeWithMultiplePods(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict all pods not maching all taints instantly",
|
||||
description: "Evict all pods not matching all taints instantly",
|
||||
pods: []v1.Pod{
|
||||
*testutil.NewPod("pod1", "node1"),
|
||||
*addToleration(testutil.NewPod("pod2", "node1"), 1, 1),
|
5
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/BUILD
generated
vendored
@ -55,8 +55,7 @@ go_test(
|
||||
"legacy_replica_calculator_test.go",
|
||||
"replica_calculator_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
@ -83,10 +82,12 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/external_metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics/fake:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/external_metrics/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
41
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
@ -299,6 +299,45 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
},
|
||||
}
|
||||
}
|
||||
case autoscalingv2.ExternalMetricSourceType:
|
||||
if metricSpec.External.TargetAverageValue != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalPerPodMetricReplicas(currentReplicas, metricSpec.External.TargetAverageValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.MetricName, err)
|
||||
}
|
||||
metricNameProposal = fmt.Sprintf("external metric %s(%+v)", metricSpec.External.MetricName, metricSpec.External.MetricSelector)
|
||||
statuses[i] = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricStatus{
|
||||
MetricSelector: metricSpec.External.MetricSelector,
|
||||
MetricName: metricSpec.External.MetricName,
|
||||
CurrentAverageValue: resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
} else if metricSpec.External.TargetValue != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.TargetValue.MilliValue(), metricSpec.External.MetricName, hpa.Namespace, metricSpec.External.MetricSelector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.MetricName, err)
|
||||
}
|
||||
metricNameProposal = fmt.Sprintf("external metric %s(%+v)", metricSpec.External.MetricName, metricSpec.External.MetricSelector)
|
||||
statuses[i] = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricStatus{
|
||||
MetricSelector: metricSpec.External.MetricSelector,
|
||||
MetricName: metricSpec.External.MetricName,
|
||||
CurrentValue: *resource.NewMilliQuantity(utilizationProposal, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
errMsg := "invalid external metric source: neither a value target nor an average value target was set"
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", errMsg)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
|
||||
}
|
||||
default:
|
||||
errMsg := fmt.Sprintf("unknown metric source type %q", string(metricSpec.Type))
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidMetricSourceType", errMsg)
|
||||
@ -313,7 +352,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
}
|
||||
}
|
||||
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to succesfully calculate a replica count from %s", metric)
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
|
||||
return replicas, metric, statuses, timestamp, nil
|
||||
}
|
||||
|
||||
|
187
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
187
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go
generated
vendored
@ -35,7 +35,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clientfake "k8s.io/client-go/kubernetes/fake"
|
||||
scalefake "k8s.io/client-go/scale/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
@ -43,9 +42,11 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1"
|
||||
emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
|
||||
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
|
||||
emfake "k8s.io/metrics/pkg/client/external_metrics/fake"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@ -146,7 +147,7 @@ func init() {
|
||||
scaleUpLimitFactor = 8
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *scalefake.FakeScaleClient) {
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *emfake.FakeExternalMetricsClient, *scalefake.FakeScaleClient) {
|
||||
namespace := "test-namespace"
|
||||
hpaName := "test-hpa"
|
||||
podNamePrefix := "test-pod"
|
||||
@ -524,7 +525,34 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
return true, metrics, nil
|
||||
})
|
||||
|
||||
return fakeClient, fakeMetricsClient, fakeCMClient, fakeScaleClient
|
||||
fakeEMClient := &emfake.FakeExternalMetricsClient{}
|
||||
|
||||
fakeEMClient.AddReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
listAction, wasList := action.(core.ListAction)
|
||||
if !wasList {
|
||||
return true, nil, fmt.Errorf("expected a list action, got %v instead", action)
|
||||
}
|
||||
|
||||
metrics := &emapi.ExternalMetricValueList{}
|
||||
|
||||
assert.Equal(t, "qps", listAction.GetResource().Resource, "the metric name requested should have been qps, as specified in the metric spec")
|
||||
|
||||
for _, level := range tc.reportedLevels {
|
||||
metric := emapi.ExternalMetricValue{
|
||||
Timestamp: metav1.Time{Time: time.Now()},
|
||||
MetricName: "qps",
|
||||
Value: *resource.NewMilliQuantity(int64(level), resource.DecimalSI),
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
|
||||
return true, metrics, nil
|
||||
})
|
||||
|
||||
return fakeClient, fakeMetricsClient, fakeCMClient, fakeEMClient, fakeScaleClient
|
||||
}
|
||||
|
||||
func (tc *testCase) verifyResults(t *testing.T) {
|
||||
@ -539,7 +567,7 @@ func (tc *testCase) verifyResults(t *testing.T) {
|
||||
}
|
||||
|
||||
func (tc *testCase) setupController(t *testing.T) (*HorizontalController, informers.SharedInformerFactory) {
|
||||
testClient, testMetricsClient, testCMClient, testScaleClient := tc.prepareTestClient(t)
|
||||
testClient, testMetricsClient, testCMClient, testEMClient, testScaleClient := tc.prepareTestClient(t)
|
||||
if tc.testClient != nil {
|
||||
testClient = tc.testClient
|
||||
}
|
||||
@ -555,9 +583,10 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
||||
metricsClient := metrics.NewRESTMetricsClient(
|
||||
testMetricsClient.MetricsV1beta1(),
|
||||
testCMClient,
|
||||
testEMClient,
|
||||
)
|
||||
|
||||
eventClient := &clientfake.Clientset{}
|
||||
eventClient := &fake.Clientset{}
|
||||
eventClient.AddReactor("create", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
@ -823,6 +852,48 @@ func TestScaleUpCMObject(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 4,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetValue: resource.NewMilliQuantity(6666, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{8600},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpPerPodCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 4,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.NewMilliQuantity(2222, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{8600},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDown(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -887,6 +958,48 @@ func TestScaleDownCMObject(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetValue: resource.NewMilliQuantity(14400, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{8600},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownPerPodCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.NewMilliQuantity(3000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{8600},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -980,6 +1093,58 @@ func TestToleranceCMObject(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestToleranceCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 4,
|
||||
desiredReplicas: 4,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetValue: resource.NewMilliQuantity(8666, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{8600},
|
||||
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.AbleToScale,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "ReadyForNewScale",
|
||||
}),
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestTolerancePerPodCMExternal(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 4,
|
||||
desiredReplicas: 4,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ExternalMetricSourceType,
|
||||
External: &autoscalingv2.ExternalMetricSource{
|
||||
MetricSelector: &metav1.LabelSelector{},
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.NewMilliQuantity(2200, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{8600},
|
||||
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.AbleToScale,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "ReadyForNewScale",
|
||||
}),
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestMinReplicas(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@ -1269,7 +1434,7 @@ func TestConditionInvalidSelectorMissing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
_, _, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
@ -1314,7 +1479,7 @@ func TestConditionInvalidSelectorUnparsable(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
_, _, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
@ -1375,7 +1540,7 @@ func TestConditionFailedGetMetrics(t *testing.T) {
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
_, testMetricsClient, testCMClient, _ := tc.prepareTestClient(t)
|
||||
_, testMetricsClient, testCMClient, _, _ := tc.prepareTestClient(t)
|
||||
tc.testMetricsClient = testMetricsClient
|
||||
tc.testCMClient = testCMClient
|
||||
|
||||
@ -1448,7 +1613,7 @@ func TestConditionFailedGetScale(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
_, _, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
@ -1475,7 +1640,7 @@ func TestConditionFailedUpdateScale(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
_, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
_, _, _, _, testScaleClient := tc.prepareTestClient(t)
|
||||
tc.testScaleClient = testScaleClient
|
||||
|
||||
testScaleClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
@ -1661,7 +1826,7 @@ func TestAvoidUncessaryUpdates(t *testing.T) {
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
testClient, _, _, _ := tc.prepareTestClient(t)
|
||||
testClient, _, _, _, _ := tc.prepareTestClient(t)
|
||||
tc.testClient = testClient
|
||||
var savedHPA *autoscalingv1.HorizontalPodAutoscaler
|
||||
testClient.PrependReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user