vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,18 +0,0 @@
{
"Rules": [
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned$",
"ForbiddenPrefixes": [
"k8s.io/kubernetes/pkg/client/unversioned"
]
},
{
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned/testclient$",
"ForbiddenPrefixes": [
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
]
}
]
}

View File

@ -1,140 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"controller_ref_manager_test.go",
"controller_utils_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/securitycontext:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"client_builder.go",
"controller_ref_manager.go",
"controller_utils.go",
"doc.go",
"lookup_cache.go",
],
importpath = "k8s.io/kubernetes/pkg/controller",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/core/validation:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//pkg/util/hash:go_default_library",
"//pkg/util/taints:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/golang/groupcache/lru:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/bootstrap:all-srcs",
"//pkg/controller/certificates:all-srcs",
"//pkg/controller/cloud:all-srcs",
"//pkg/controller/clusterroleaggregation:all-srcs",
"//pkg/controller/cronjob:all-srcs",
"//pkg/controller/daemon:all-srcs",
"//pkg/controller/deployment:all-srcs",
"//pkg/controller/disruption:all-srcs",
"//pkg/controller/endpoint:all-srcs",
"//pkg/controller/garbagecollector:all-srcs",
"//pkg/controller/history:all-srcs",
"//pkg/controller/job:all-srcs",
"//pkg/controller/namespace:all-srcs",
"//pkg/controller/nodeipam:all-srcs",
"//pkg/controller/nodelifecycle:all-srcs",
"//pkg/controller/podautoscaler:all-srcs",
"//pkg/controller/podgc:all-srcs",
"//pkg/controller/replicaset:all-srcs",
"//pkg/controller/replication:all-srcs",
"//pkg/controller/resourcequota:all-srcs",
"//pkg/controller/route:all-srcs",
"//pkg/controller/service:all-srcs",
"//pkg/controller/serviceaccount:all-srcs",
"//pkg/controller/statefulset:all-srcs",
"//pkg/controller/testutil:all-srcs",
"//pkg/controller/ttl:all-srcs",
"//pkg/controller/util/node:all-srcs",
"//pkg/controller/volume/attachdetach:all-srcs",
"//pkg/controller/volume/events:all-srcs",
"//pkg/controller/volume/expand:all-srcs",
"//pkg/controller/volume/persistentvolume:all-srcs",
"//pkg/controller/volume/pvcprotection:all-srcs",
"//pkg/controller/volume/pvprotection:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,7 +0,0 @@
approvers:
- deads2k
- derekwaynecarr
- mikedanese
- janetkuo
reviewers:
- deads2k

View File

@ -1,78 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"bootstrapsigner_test.go",
"common_test.go",
"jws_test.go",
"tokencleaner_test.go",
"util_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/helper:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"bootstrapsigner.go",
"doc.go",
"jws.go",
"tokencleaner.go",
"util.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/bootstrap",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/gopkg.in/square/go-jose.v2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,307 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"strings"
"time"
"github.com/golang/glog"
"fmt"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
informers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/metrics"
)
// BootstrapSignerOptions contains options for the BootstrapSigner
type BootstrapSignerOptions struct {
// ConfigMapNamespace is the namespace of the ConfigMap
ConfigMapNamespace string
// ConfigMapName is the name for the ConfigMap
ConfigMapName string
// TokenSecretNamespace string is the namespace for token Secrets.
TokenSecretNamespace string
// ConfigMapResynce is the time.Duration at which to fully re-list configmaps.
// If zero, re-list will be delayed as long as possible
ConfigMapResync time.Duration
// SecretResync is the time.Duration at which to fully re-list secrets.
// If zero, re-list will be delayed as long as possible
SecretResync time.Duration
}
// DefaultBootstrapSignerOptions returns a set of default options for creating a
// BootstrapSigner
func DefaultBootstrapSignerOptions() BootstrapSignerOptions {
return BootstrapSignerOptions{
ConfigMapNamespace: api.NamespacePublic,
ConfigMapName: bootstrapapi.ConfigMapClusterInfo,
TokenSecretNamespace: api.NamespaceSystem,
}
}
// BootstrapSigner is a controller that signs a ConfigMap with a set of tokens.
type BootstrapSigner struct {
client clientset.Interface
configMapKey string
configMapName string
configMapNamespace string
secretNamespace string
// syncQueue handles synchronizing updates to the ConfigMap. We'll only ever
// have one item (Named <ConfigMapName>) in this queue. We are using it
// serializes and collapses updates as they can come from both the ConfigMap
// and Secrets controllers.
syncQueue workqueue.RateLimitingInterface
secretLister corelisters.SecretLister
secretSynced cache.InformerSynced
configMapLister corelisters.ConfigMapLister
configMapSynced cache.InformerSynced
}
// NewBootstrapSigner returns a new *BootstrapSigner.
func NewBootstrapSigner(cl clientset.Interface, secrets informers.SecretInformer, configMaps informers.ConfigMapInformer, options BootstrapSignerOptions) (*BootstrapSigner, error) {
e := &BootstrapSigner{
client: cl,
configMapKey: options.ConfigMapNamespace + "/" + options.ConfigMapName,
configMapName: options.ConfigMapName,
configMapNamespace: options.ConfigMapNamespace,
secretNamespace: options.TokenSecretNamespace,
secretLister: secrets.Lister(),
secretSynced: secrets.Informer().HasSynced,
configMapLister: configMaps.Lister(),
configMapSynced: configMaps.Informer().HasSynced,
syncQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "bootstrap_signer_queue"),
}
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
configMaps.Informer().AddEventHandlerWithResyncPeriod(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.ConfigMap:
return t.Name == options.ConfigMapName && t.Namespace == options.ConfigMapNamespace
default:
utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj))
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: func(_ interface{}) { e.pokeConfigMapSync() },
UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() },
},
},
options.ConfigMapResync,
)
secrets.Informer().AddEventHandlerWithResyncPeriod(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Secret:
return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.secretNamespace
default:
utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj))
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: func(_ interface{}) { e.pokeConfigMapSync() },
UpdateFunc: func(_, _ interface{}) { e.pokeConfigMapSync() },
DeleteFunc: func(_ interface{}) { e.pokeConfigMapSync() },
},
},
options.SecretResync,
)
return e, nil
}
// Run runs controller loops and returns when they are done
func (e *BootstrapSigner) Run(stopCh <-chan struct{}) {
// Shut down queues
defer utilruntime.HandleCrash()
defer e.syncQueue.ShutDown()
if !controller.WaitForCacheSync("bootstrap_signer", stopCh, e.configMapSynced, e.secretSynced) {
return
}
glog.V(5).Infof("Starting workers")
go wait.Until(e.serviceConfigMapQueue, 0, stopCh)
<-stopCh
glog.V(1).Infof("Shutting down")
}
func (e *BootstrapSigner) pokeConfigMapSync() {
e.syncQueue.Add(e.configMapKey)
}
func (e *BootstrapSigner) serviceConfigMapQueue() {
key, quit := e.syncQueue.Get()
if quit {
return
}
defer e.syncQueue.Done(key)
e.signConfigMap()
}
// signConfigMap computes the signatures on our latest cached objects and writes
// back if necessary.
func (e *BootstrapSigner) signConfigMap() {
origCM := e.getConfigMap()
if origCM == nil {
return
}
var needUpdate = false
newCM := origCM.DeepCopy()
// First capture the config we are signing
content, ok := newCM.Data[bootstrapapi.KubeConfigKey]
if !ok {
glog.V(3).Infof("No %s key in %s/%s ConfigMap", bootstrapapi.KubeConfigKey, origCM.Namespace, origCM.Name)
return
}
// Next remove and save all existing signatures
sigs := map[string]string{}
for key, value := range newCM.Data {
if strings.HasPrefix(key, bootstrapapi.JWSSignatureKeyPrefix) {
tokenID := strings.TrimPrefix(key, bootstrapapi.JWSSignatureKeyPrefix)
sigs[tokenID] = value
delete(newCM.Data, key)
}
}
// Now recompute signatures and store them on the new map
tokens := e.getTokens()
for tokenID, tokenValue := range tokens {
sig, err := computeDetachedSig(content, tokenID, tokenValue)
if err != nil {
utilruntime.HandleError(err)
}
// Check to see if this signature is changed or new.
oldSig, _ := sigs[tokenID]
if sig != oldSig {
needUpdate = true
}
delete(sigs, tokenID)
newCM.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] = sig
}
// If we have signatures left over we know that some signatures were
// removed. We now need to update the ConfigMap
if len(sigs) != 0 {
needUpdate = true
}
if needUpdate {
e.updateConfigMap(newCM)
}
}
func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) {
_, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm)
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
glog.V(3).Infof("Error updating ConfigMap: %v", err)
}
}
// getConfigMap gets the ConfigMap we are interested in
func (e *BootstrapSigner) getConfigMap() *v1.ConfigMap {
configMap, err := e.configMapLister.ConfigMaps(e.configMapNamespace).Get(e.configMapName)
// If we can't get the configmap just return nil. The resync will eventually
// sync things up.
if err != nil {
if !apierrors.IsNotFound(err) {
utilruntime.HandleError(err)
}
return nil
}
return configMap
}
func (e *BootstrapSigner) listSecrets() []*v1.Secret {
secrets, err := e.secretLister.Secrets(e.secretNamespace).List(labels.Everything())
if err != nil {
utilruntime.HandleError(err)
return nil
}
items := []*v1.Secret{}
for _, secret := range secrets {
if secret.Type == bootstrapapi.SecretTypeBootstrapToken {
items = append(items, secret)
}
}
return items
}
// getTokens returns a map of tokenID->tokenSecret. It ensures the token is
// valid for signing.
func (e *BootstrapSigner) getTokens() map[string]string {
ret := map[string]string{}
secretObjs := e.listSecrets()
for _, secret := range secretObjs {
tokenID, tokenSecret, ok := validateSecretForSigning(secret)
if !ok {
continue
}
// Check and warn for duplicate secrets. Behavior here will be undefined.
if _, ok := ret[tokenID]; ok {
// This should never happen as we ensure a consistent secret name.
// But leave this in here just in case.
glog.V(1).Infof("Duplicate bootstrap tokens found for id %s, ignoring on in %s/%s", tokenID, secret.Namespace, secret.Name)
continue
}
// This secret looks good, add it to the list.
ret[tokenID] = tokenSecret
}
return ret
}

View File

@ -1,165 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"testing"
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller"
)
func init() {
spew.Config.DisableMethods = true
}
const testTokenID = "abc123"
func newBootstrapSigner() (*BootstrapSigner, *fake.Clientset, coreinformers.SecretInformer, coreinformers.ConfigMapInformer, error) {
options := DefaultBootstrapSignerOptions()
cl := fake.NewSimpleClientset()
informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc())
secrets := informers.Core().V1().Secrets()
configMaps := informers.Core().V1().ConfigMaps()
bsc, err := NewBootstrapSigner(cl, secrets, configMaps, options)
if err != nil {
return nil, nil, nil, nil, err
}
return bsc, cl, secrets, configMaps, nil
}
func newConfigMap(tokenID, signature string) *v1.ConfigMap {
ret := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespacePublic,
Name: bootstrapapi.ConfigMapClusterInfo,
ResourceVersion: "1",
},
Data: map[string]string{
bootstrapapi.KubeConfigKey: "payload",
},
}
if len(tokenID) > 0 {
ret.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] = signature
}
return ret
}
func TestNoConfigMap(t *testing.T) {
signer, cl, _, _, err := newBootstrapSigner()
if err != nil {
t.Fatalf("error creating BootstrapSigner: %v", err)
}
signer.signConfigMap()
verifyActions(t, []core.Action{}, cl.Actions())
}
func TestSimpleSign(t *testing.T) {
signer, cl, secrets, configMaps, err := newBootstrapSigner()
if err != nil {
t.Fatalf("error creating BootstrapSigner: %v", err)
}
cm := newConfigMap("", "")
configMaps.Informer().GetIndexer().Add(cm)
secret := newTokenSecret(testTokenID, "tokenSecret")
addSecretSigningUsage(secret, "true")
secrets.Informer().GetIndexer().Add(secret)
signer.signConfigMap()
expected := []core.Action{
core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"},
api.NamespacePublic,
newConfigMap(testTokenID, "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiYzEyMyJ9..QSxpUG7Q542CirTI2ECPSZjvBOJURUW5a7XqFpNI958")),
}
verifyActions(t, expected, cl.Actions())
}
func TestNoSignNeeded(t *testing.T) {
signer, cl, secrets, configMaps, err := newBootstrapSigner()
if err != nil {
t.Fatalf("error creating BootstrapSigner: %v", err)
}
cm := newConfigMap(testTokenID, "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiYzEyMyJ9..QSxpUG7Q542CirTI2ECPSZjvBOJURUW5a7XqFpNI958")
configMaps.Informer().GetIndexer().Add(cm)
secret := newTokenSecret(testTokenID, "tokenSecret")
addSecretSigningUsage(secret, "true")
secrets.Informer().GetIndexer().Add(secret)
signer.signConfigMap()
verifyActions(t, []core.Action{}, cl.Actions())
}
func TestUpdateSignature(t *testing.T) {
signer, cl, secrets, configMaps, err := newBootstrapSigner()
if err != nil {
t.Fatalf("error creating BootstrapSigner: %v", err)
}
cm := newConfigMap(testTokenID, "old signature")
configMaps.Informer().GetIndexer().Add(cm)
secret := newTokenSecret(testTokenID, "tokenSecret")
addSecretSigningUsage(secret, "true")
secrets.Informer().GetIndexer().Add(secret)
signer.signConfigMap()
expected := []core.Action{
core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"},
api.NamespacePublic,
newConfigMap(testTokenID, "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiYzEyMyJ9..QSxpUG7Q542CirTI2ECPSZjvBOJURUW5a7XqFpNI958")),
}
verifyActions(t, expected, cl.Actions())
}
func TestRemoveSignature(t *testing.T) {
signer, cl, _, configMaps, err := newBootstrapSigner()
if err != nil {
t.Fatalf("error creating BootstrapSigner: %v", err)
}
cm := newConfigMap(testTokenID, "old signature")
configMaps.Informer().GetIndexer().Add(cm)
signer.signConfigMap()
expected := []core.Action{
core.NewUpdateAction(schema.GroupVersionResource{Version: "v1", Resource: "configmaps"},
api.NamespacePublic,
newConfigMap("", "")),
}
verifyActions(t, expected, cl.Actions())
}

View File

@ -1,74 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"testing"
"github.com/davecgh/go-spew/spew"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
core "k8s.io/client-go/testing"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
func newTokenSecret(tokenID, tokenSecret string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
Name: bootstrapapi.BootstrapTokenSecretPrefix + tokenID,
ResourceVersion: "1",
},
Type: bootstrapapi.SecretTypeBootstrapToken,
Data: map[string][]byte{
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
bootstrapapi.BootstrapTokenSecretKey: []byte(tokenSecret),
},
}
}
func addSecretExpiration(s *v1.Secret, expiration string) {
s.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expiration)
}
func addSecretSigningUsage(s *v1.Secret, value string) {
s.Data[bootstrapapi.BootstrapTokenUsageSigningKey] = []byte(value)
}
func verifyActions(t *testing.T, expected, actual []core.Action) {
for i, a := range actual {
if len(expected) < i+1 {
t.Errorf("%d unexpected actions: %s", len(actual)-len(expected), spew.Sdump(actual[i:]))
break
}
e := expected[i]
if !helper.Semantic.DeepEqual(e, a) {
t.Errorf("Expected\n\t%s\ngot\n\t%s", spew.Sdump(e), spew.Sdump(a))
continue
}
}
if len(expected) > len(actual) {
t.Errorf("%d additional expected actions", len(expected)-len(actual))
for _, a := range expected[len(actual):] {
t.Logf(" %s", spew.Sdump(a))
}
}
}

View File

@ -1,20 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package bootstrap provides automatic processes necessary for bootstraping.
// This includes managing and expiring tokens along with signing well known
// configmaps with those tokens.
package bootstrap // import "k8s.io/kubernetes/pkg/controller/bootstrap"

View File

@ -1,82 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"fmt"
"strings"
jose "gopkg.in/square/go-jose.v2"
)
// computeDetachedSig takes content and token details and computes a detached
// JWS signature. This is described in Appendix F of RFC 7515. Basically, this
// is a regular JWS with the content part of the signature elided.
func computeDetachedSig(content, tokenID, tokenSecret string) (string, error) {
jwk := &jose.JSONWebKey{
Key: []byte(tokenSecret),
KeyID: tokenID,
}
opts := &jose.SignerOptions{
// Since this is a symmetric key, go-jose doesn't automatically include
// the KeyID as part of the protected header. We have to pass it here
// explicitly.
ExtraHeaders: map[jose.HeaderKey]interface{}{
"kid": tokenID,
},
}
signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.HS256, Key: jwk}, opts)
if err != nil {
return "", fmt.Errorf("can't make a HS256 signer from the given token: %v", err)
}
jws, err := signer.Sign([]byte(content))
if err != nil {
return "", fmt.Errorf("can't HS256-sign the given token: %v", err)
}
fullSig, err := jws.CompactSerialize()
if err != nil {
return "", fmt.Errorf("can't serialize the given token: %v", err)
}
return stripContent(fullSig)
}
// stripContent will remove the content part of a compact JWS
//
// The `go-jose` library doesn't support generating signatures with "detached"
// content. To make up for this we take the full compact signature, break it
// apart and put it back together without the content section.
func stripContent(fullSig string) (string, error) {
parts := strings.Split(fullSig, ".")
if len(parts) != 3 {
return "", fmt.Errorf("compact JWS format must have three parts")
}
return parts[0] + ".." + parts[2], nil
}
// DetachedTokenIsValid checks whether a given detached JWS-encoded token matches JWS output of the given content and token
func DetachedTokenIsValid(detachedToken, content, tokenID, tokenSecret string) bool {
newToken, err := computeDetachedSig(content, tokenID, tokenSecret)
if err != nil {
return false
}
return detachedToken == newToken
}

View File

@ -1,69 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"testing"
"github.com/stretchr/testify/assert"
)
const (
content = "Hello from the other side. I must have called a thousand times."
secret = "my voice is my passcode"
id = "joshua"
)
func TestComputeDetachedSig(t *testing.T) {
sig, err := computeDetachedSig(content, id, secret)
assert.NoError(t, err, "Error when computing signature: %v", err)
assert.Equal(
t,
"eyJhbGciOiJIUzI1NiIsImtpZCI6Impvc2h1YSJ9..VShe2taLd-YTrmWuRkcL_8QTNDHYxQIEBsAYYiIj1_8",
sig,
"Wrong signature. Got: %v", sig)
// Try with null content
sig, err = computeDetachedSig("", id, secret)
assert.NoError(t, err, "Error when computing signature: %v", err)
assert.Equal(
t,
"eyJhbGciOiJIUzI1NiIsImtpZCI6Impvc2h1YSJ9..7Ui1ALizW4jXphVUB7xUqC9vLYLL9RZeOFfVLoB7Tgk",
sig,
"Wrong signature. Got: %v", sig)
// Try with no secret
sig, err = computeDetachedSig(content, id, "")
assert.NoError(t, err, "Error when computing signature: %v", err)
assert.Equal(
t,
"eyJhbGciOiJIUzI1NiIsImtpZCI6Impvc2h1YSJ9..UfkqvDGiIFxrMnFseDj9LYJOLNrvjW8aHhF71mvvAs8",
sig,
"Wrong signature. Got: %v", sig)
}
func TestDetachedTokenIsValid(t *testing.T) {
// Valid detached JWS token and valid inputs should succeed
sig := "eyJhbGciOiJIUzI1NiIsImtpZCI6Impvc2h1YSJ9..VShe2taLd-YTrmWuRkcL_8QTNDHYxQIEBsAYYiIj1_8"
assert.True(t, DetachedTokenIsValid(sig, content, id, secret),
"Content %q and token \"%s:%s\" should equal signature: %q", content, id, secret, sig)
// Invalid detached JWS token and valid inputs should fail
sig2 := sig + "foo"
assert.False(t, DetachedTokenIsValid(sig2, content, id, secret),
"Content %q and token \"%s:%s\" should not equal signature: %q", content, id, secret, sig)
}

View File

@ -1,203 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/metrics"
)
// TokenCleanerOptions contains options for the TokenCleaner
type TokenCleanerOptions struct {
// TokenSecretNamespace string is the namespace for token Secrets.
TokenSecretNamespace string
// SecretResync is the time.Duration at which to fully re-list secrets.
// If zero, re-list will be delayed as long as possible
SecretResync time.Duration
}
// DefaultTokenCleanerOptions returns a set of default options for creating a
// TokenCleaner
func DefaultTokenCleanerOptions() TokenCleanerOptions {
return TokenCleanerOptions{
TokenSecretNamespace: api.NamespaceSystem,
}
}
// TokenCleaner is a controller that deletes expired tokens
type TokenCleaner struct {
tokenSecretNamespace string
client clientset.Interface
// secretLister is able to list/get secrets and is populated by the shared informer passed to NewTokenCleaner.
secretLister corelisters.SecretLister
// secretSynced returns true if the secret shared informer has been synced at least once.
secretSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
}
// NewTokenCleaner returns a new *NewTokenCleaner.
func NewTokenCleaner(cl clientset.Interface, secrets coreinformers.SecretInformer, options TokenCleanerOptions) (*TokenCleaner, error) {
e := &TokenCleaner{
client: cl,
secretLister: secrets.Lister(),
secretSynced: secrets.Informer().HasSynced,
tokenSecretNamespace: options.TokenSecretNamespace,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "token_cleaner"),
}
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
secrets.Informer().AddEventHandlerWithResyncPeriod(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *v1.Secret:
return t.Type == bootstrapapi.SecretTypeBootstrapToken && t.Namespace == e.tokenSecretNamespace
default:
utilruntime.HandleError(fmt.Errorf("object passed to %T that is not expected: %T", e, obj))
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: e.enqueueSecrets,
UpdateFunc: func(oldSecret, newSecret interface{}) { e.enqueueSecrets(newSecret) },
},
},
options.SecretResync,
)
return e, nil
}
// Run runs controller loops and returns when they are done
func (tc *TokenCleaner) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer tc.queue.ShutDown()
glog.Infof("Starting token cleaner controller")
defer glog.Infof("Shutting down token cleaner controller")
if !controller.WaitForCacheSync("token_cleaner", stopCh, tc.secretSynced) {
return
}
go wait.Until(tc.worker, 10*time.Second, stopCh)
<-stopCh
}
func (tc *TokenCleaner) enqueueSecrets(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(err)
return
}
tc.queue.Add(key)
}
// worker runs a thread that dequeues secrets, handles them, and marks them done.
func (tc *TokenCleaner) worker() {
for tc.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (tc *TokenCleaner) processNextWorkItem() bool {
key, quit := tc.queue.Get()
if quit {
return false
}
defer tc.queue.Done(key)
if err := tc.syncFunc(key.(string)); err != nil {
tc.queue.AddRateLimited(key)
utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", key, err))
return true
}
tc.queue.Forget(key)
return true
}
func (tc *TokenCleaner) syncFunc(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
ret, err := tc.secretLister.Secrets(namespace).Get(name)
if apierrors.IsNotFound(err) {
glog.V(3).Infof("secret has been deleted: %v", key)
return nil
}
if err != nil {
return err
}
if ret.Type == bootstrapapi.SecretTypeBootstrapToken {
tc.evalSecret(ret)
}
return nil
}
func (tc *TokenCleaner) evalSecret(o interface{}) {
secret := o.(*v1.Secret)
if isSecretExpired(secret) {
glog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name)
var options *metav1.DeleteOptions
if len(secret.UID) > 0 {
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}
}
err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, options)
// NotFound isn't a real error (it's already been deleted)
// Conflict isn't a real error (the UID precondition failed)
if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
glog.V(3).Infof("Error deleting Secret: %v", err)
}
}
}

View File

@ -1,102 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
)
func init() {
spew.Config.DisableMethods = true
}
func newTokenCleaner() (*TokenCleaner, *fake.Clientset, coreinformers.SecretInformer, error) {
options := DefaultTokenCleanerOptions()
cl := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(cl, options.SecretResync)
secrets := informerFactory.Core().V1().Secrets()
tcc, err := NewTokenCleaner(cl, secrets, options)
if err != nil {
return nil, nil, nil, err
}
return tcc, cl, secrets, nil
}
func TestCleanerNoExpiration(t *testing.T) {
cleaner, cl, secrets, err := newTokenCleaner()
if err != nil {
t.Fatalf("error creating TokenCleaner: %v", err)
}
secret := newTokenSecret("tokenID", "tokenSecret")
secrets.Informer().GetIndexer().Add(secret)
cleaner.evalSecret(secret)
expected := []core.Action{}
verifyActions(t, expected, cl.Actions())
}
func TestCleanerExpired(t *testing.T) {
cleaner, cl, secrets, err := newTokenCleaner()
if err != nil {
t.Fatalf("error creating TokenCleaner: %v", err)
}
secret := newTokenSecret("tokenID", "tokenSecret")
addSecretExpiration(secret, timeString(-time.Hour))
secrets.Informer().GetIndexer().Add(secret)
cleaner.evalSecret(secret)
expected := []core.Action{
core.NewDeleteAction(
schema.GroupVersionResource{Version: "v1", Resource: "secrets"},
api.NamespaceSystem,
secret.ObjectMeta.Name),
}
verifyActions(t, expected, cl.Actions())
}
func TestCleanerNotExpired(t *testing.T) {
cleaner, cl, secrets, err := newTokenCleaner()
if err != nil {
t.Fatalf("error creating TokenCleaner: %v", err)
}
secret := newTokenSecret("tokenID", "tokenSecret")
addSecretExpiration(secret, timeString(time.Hour))
secrets.Informer().GetIndexer().Add(secret)
cleaner.evalSecret(secret)
expected := []core.Action{}
verifyActions(t, expected, cl.Actions())
}

View File

@ -1,109 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"regexp"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
)
var namePattern = `^` + regexp.QuoteMeta(bootstrapapi.BootstrapTokenSecretPrefix) + `([a-z0-9]{6})$`
var nameRegExp = regexp.MustCompile(namePattern)
// getSecretString gets a string value from a secret. If there is an error or
// if the key doesn't exist, an empty string is returned.
func getSecretString(secret *v1.Secret, key string) string {
data, ok := secret.Data[key]
if !ok {
return ""
}
return string(data)
}
// parseSecretName parses the name of the secret to extract the secret ID.
func parseSecretName(name string) (secretID string, ok bool) {
r := nameRegExp.FindStringSubmatch(name)
if r == nil {
return "", false
}
return r[1], true
}
func validateSecretForSigning(secret *v1.Secret) (tokenID, tokenSecret string, ok bool) {
nameTokenID, ok := parseSecretName(secret.Name)
if !ok {
glog.V(3).Infof("Invalid secret name: %s. Must be of form %s<secret-id>.", secret.Name, bootstrapapi.BootstrapTokenSecretPrefix)
return "", "", false
}
tokenID = getSecretString(secret, bootstrapapi.BootstrapTokenIDKey)
if len(tokenID) == 0 {
glog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenIDKey, secret.Namespace, secret.Name)
return "", "", false
}
if nameTokenID != tokenID {
glog.V(3).Infof("Token ID (%s) doesn't match secret name: %s", tokenID, nameTokenID)
return "", "", false
}
tokenSecret = getSecretString(secret, bootstrapapi.BootstrapTokenSecretKey)
if len(tokenSecret) == 0 {
glog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenSecretKey, secret.Namespace, secret.Name)
return "", "", false
}
// Ensure this secret hasn't expired. The TokenCleaner should remove this
// but if that isn't working or it hasn't gotten there yet we should check
// here.
if isSecretExpired(secret) {
return "", "", false
}
// Make sure this secret can be used for signing
okToSign := getSecretString(secret, bootstrapapi.BootstrapTokenUsageSigningKey)
if okToSign != "true" {
return "", "", false
}
return tokenID, tokenSecret, true
}
// isSecretExpired returns true if the Secret is expired.
func isSecretExpired(secret *v1.Secret) bool {
expiration := getSecretString(secret, bootstrapapi.BootstrapTokenExpirationKey)
if len(expiration) > 0 {
expTime, err2 := time.Parse(time.RFC3339, expiration)
if err2 != nil {
glog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.",
expiration, secret.Namespace, secret.Name, err2)
return true
}
if time.Now().After(expTime) {
glog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v",
secret.Namespace, secret.Name, expiration)
return true
}
}
return false
}

View File

@ -1,206 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
)
const (
givenTokenID = "abc123"
givenTokenID2 = "def456"
givenTokenSecret = "tokenSecret"
)
func timeString(delta time.Duration) string {
return time.Now().Add(delta).Format(time.RFC3339)
}
func TestValidateSecretForSigning(t *testing.T) {
cases := []struct {
description string
tokenID string
tokenSecret string
okToSign string
expiration string
valid bool
}{
{
"Signing token with no exp",
givenTokenID, givenTokenSecret, "true", "", true,
},
{
"Signing token with valid exp",
givenTokenID, givenTokenSecret, "true", timeString(time.Hour), true,
},
{
"Expired signing token",
givenTokenID, givenTokenSecret, "true", timeString(-time.Hour), false,
},
{
"Signing token with bad exp",
givenTokenID, givenTokenSecret, "true", "garbage", false,
},
{
"Signing token without signing bit",
givenTokenID, givenTokenSecret, "", "garbage", false,
},
{
"Signing token with bad signing bit",
givenTokenID, givenTokenSecret, "", "", false,
},
{
"Signing token with no ID",
"", givenTokenSecret, "true", "", false,
},
{
"Signing token with no secret",
givenTokenID, "", "true", "", false,
},
}
for _, tc := range cases {
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
Name: bootstrapapi.BootstrapTokenSecretPrefix + givenTokenID,
ResourceVersion: "1",
},
Type: bootstrapapi.SecretTypeBootstrapToken,
Data: map[string][]byte{
bootstrapapi.BootstrapTokenIDKey: []byte(tc.tokenID),
bootstrapapi.BootstrapTokenSecretKey: []byte(tc.tokenSecret),
bootstrapapi.BootstrapTokenUsageSigningKey: []byte(tc.okToSign),
bootstrapapi.BootstrapTokenExpirationKey: []byte(tc.expiration),
},
}
tokenID, tokenSecret, ok := validateSecretForSigning(secret)
if ok != tc.valid {
t.Errorf("%s: Unexpected validation failure. Expected %v, got %v", tc.description, tc.valid, ok)
}
if ok {
if tokenID != tc.tokenID {
t.Errorf("%s: Unexpected Token ID. Expected %q, got %q", tc.description, givenTokenID, tokenID)
}
if tokenSecret != tc.tokenSecret {
t.Errorf("%s: Unexpected Token Secret. Expected %q, got %q", tc.description, givenTokenSecret, tokenSecret)
}
}
}
}
func TestValidateSecret(t *testing.T) {
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
Name: bootstrapapi.BootstrapTokenSecretPrefix + givenTokenID,
ResourceVersion: "1",
},
Type: bootstrapapi.SecretTypeBootstrapToken,
Data: map[string][]byte{
bootstrapapi.BootstrapTokenIDKey: []byte(givenTokenID),
bootstrapapi.BootstrapTokenSecretKey: []byte(givenTokenSecret),
bootstrapapi.BootstrapTokenUsageSigningKey: []byte("true"),
},
}
tokenID, tokenSecret, ok := validateSecretForSigning(secret)
if !ok {
t.Errorf("Unexpected validation failure.")
}
if tokenID != givenTokenID {
t.Errorf("Unexpected Token ID. Expected %q, got %q", givenTokenID, tokenID)
}
if tokenSecret != givenTokenSecret {
t.Errorf("Unexpected Token Secret. Expected %q, got %q", givenTokenSecret, tokenSecret)
}
}
func TestBadSecretName(t *testing.T) {
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
Name: givenTokenID,
ResourceVersion: "1",
},
Type: bootstrapapi.SecretTypeBootstrapToken,
Data: map[string][]byte{
bootstrapapi.BootstrapTokenIDKey: []byte(givenTokenID),
bootstrapapi.BootstrapTokenSecretKey: []byte(givenTokenSecret),
bootstrapapi.BootstrapTokenUsageSigningKey: []byte("true"),
},
}
_, _, ok := validateSecretForSigning(secret)
if ok {
t.Errorf("Token validation should fail with bad name")
}
}
func TestMismatchSecretName(t *testing.T) {
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
Name: bootstrapapi.BootstrapTokenSecretPrefix + givenTokenID2,
ResourceVersion: "1",
},
Type: bootstrapapi.SecretTypeBootstrapToken,
Data: map[string][]byte{
bootstrapapi.BootstrapTokenIDKey: []byte(givenTokenID),
bootstrapapi.BootstrapTokenSecretKey: []byte(givenTokenSecret),
bootstrapapi.BootstrapTokenUsageSigningKey: []byte("true"),
},
}
_, _, ok := validateSecretForSigning(secret)
if ok {
t.Errorf("Token validation should fail with mismatched name")
}
}
func TestParseSecretName(t *testing.T) {
tokenID, ok := parseSecretName("bootstrap-token-abc123")
assert.True(t, ok, "parseSecretName should accept valid name")
assert.Equal(t, "abc123", tokenID, "parseSecretName should return token ID")
_, ok = parseSecretName("")
assert.False(t, ok, "parseSecretName should reject blank name")
_, ok = parseSecretName("abc123")
assert.False(t, ok, "parseSecretName should reject with no prefix")
_, ok = parseSecretName("bootstrap-token-")
assert.False(t, ok, "parseSecretName should reject no token ID")
_, ok = parseSecretName("bootstrap-token-abc")
assert.False(t, ok, "parseSecretName should reject short token ID")
_, ok = parseSecretName("bootstrap-token-abc123ghi")
assert.False(t, ok, "parseSecretName should reject long token ID")
_, ok = parseSecretName("bootstrap-token-ABC123")
assert.False(t, ok, "parseSecretName should reject invalid token ID")
}

View File

@ -1,66 +0,0 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"certificate_controller.go",
"certificate_controller_utils.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/certificates",
visibility = ["//visibility:public"],
deps = [
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/time/rate:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/certificates/approver:all-srcs",
"//pkg/controller/certificates/cleaner:all-srcs",
"//pkg/controller/certificates/signer:all-srcs",
],
tags = ["automanaged"],
visibility = [
"//pkg/controller:__pkg__",
],
)
go_test(
name = "go_default_test",
srcs = ["certificate_controller_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@ -1,4 +0,0 @@
reviewers:
- deads2k
- mikedanese
- awly

View File

@ -1,49 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["sarapprove_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["sarapprove.go"],
importpath = "k8s.io/kubernetes/pkg/controller/certificates/approver",
deps = [
"//pkg/apis/certificates/v1beta1:go_default_library",
"//pkg/controller/certificates:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,194 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package approver implements an automated approver for kubelet certificates.
package approver
import (
"crypto/x509"
"fmt"
"reflect"
"strings"
authorization "k8s.io/api/authorization/v1beta1"
capi "k8s.io/api/certificates/v1beta1"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes"
k8s_certificates_v1beta1 "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
"k8s.io/kubernetes/pkg/controller/certificates"
)
type csrRecognizer struct {
recognize func(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool
permission authorization.ResourceAttributes
successMessage string
}
type sarApprover struct {
client clientset.Interface
recognizers []csrRecognizer
}
func NewCSRApprovingController(client clientset.Interface, csrInformer certificatesinformers.CertificateSigningRequestInformer) *certificates.CertificateController {
approver := &sarApprover{
client: client,
recognizers: recognizers(),
}
return certificates.NewCertificateController(
client,
csrInformer,
approver.handle,
)
}
func recognizers() []csrRecognizer {
recognizers := []csrRecognizer{
{
recognize: isSelfNodeClientCert,
permission: authorization.ResourceAttributes{Group: "certificates.k8s.io", Resource: "certificatesigningrequests", Verb: "create", Subresource: "selfnodeclient"},
successMessage: "Auto approving self kubelet client certificate after SubjectAccessReview.",
},
{
recognize: isNodeClientCert,
permission: authorization.ResourceAttributes{Group: "certificates.k8s.io", Resource: "certificatesigningrequests", Verb: "create", Subresource: "nodeclient"},
successMessage: "Auto approving kubelet client certificate after SubjectAccessReview.",
},
}
return recognizers
}
func (a *sarApprover) handle(csr *capi.CertificateSigningRequest) error {
if len(csr.Status.Certificate) != 0 {
return nil
}
if approved, denied := certificates.GetCertApprovalCondition(&csr.Status); approved || denied {
return nil
}
x509cr, err := k8s_certificates_v1beta1.ParseCSR(csr)
if err != nil {
return fmt.Errorf("unable to parse csr %q: %v", csr.Name, err)
}
tried := []string{}
for _, r := range a.recognizers {
if !r.recognize(csr, x509cr) {
continue
}
tried = append(tried, r.permission.Subresource)
approved, err := a.authorize(csr, r.permission)
if err != nil {
return err
}
if approved {
appendApprovalCondition(csr, r.successMessage)
_, err = a.client.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(csr)
if err != nil {
return fmt.Errorf("error updating approval for csr: %v", err)
}
return nil
}
}
if len(tried) != 0 {
return certificates.IgnorableError("recognized csr %q as %v but subject access review was not approved", csr.Name, tried)
}
return nil
}
func (a *sarApprover) authorize(csr *capi.CertificateSigningRequest, rattrs authorization.ResourceAttributes) (bool, error) {
extra := make(map[string]authorization.ExtraValue)
for k, v := range csr.Spec.Extra {
extra[k] = authorization.ExtraValue(v)
}
sar := &authorization.SubjectAccessReview{
Spec: authorization.SubjectAccessReviewSpec{
User: csr.Spec.Username,
UID: csr.Spec.UID,
Groups: csr.Spec.Groups,
Extra: extra,
ResourceAttributes: &rattrs,
},
}
sar, err := a.client.AuthorizationV1beta1().SubjectAccessReviews().Create(sar)
if err != nil {
return false, err
}
return sar.Status.Allowed, nil
}
func appendApprovalCondition(csr *capi.CertificateSigningRequest, message string) {
csr.Status.Conditions = append(csr.Status.Conditions, capi.CertificateSigningRequestCondition{
Type: capi.CertificateApproved,
Reason: "AutoApproved",
Message: message,
})
}
func hasExactUsages(csr *capi.CertificateSigningRequest, usages []capi.KeyUsage) bool {
if len(usages) != len(csr.Spec.Usages) {
return false
}
usageMap := map[capi.KeyUsage]struct{}{}
for _, u := range usages {
usageMap[u] = struct{}{}
}
for _, u := range csr.Spec.Usages {
if _, ok := usageMap[u]; !ok {
return false
}
}
return true
}
var kubeletClientUsages = []capi.KeyUsage{
capi.UsageKeyEncipherment,
capi.UsageDigitalSignature,
capi.UsageClientAuth,
}
func isNodeClientCert(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool {
if !reflect.DeepEqual([]string{"system:nodes"}, x509cr.Subject.Organization) {
return false
}
if (len(x509cr.DNSNames) > 0) || (len(x509cr.EmailAddresses) > 0) || (len(x509cr.IPAddresses) > 0) {
return false
}
if !hasExactUsages(csr, kubeletClientUsages) {
return false
}
if !strings.HasPrefix(x509cr.Subject.CommonName, "system:node:") {
return false
}
return true
}
func isSelfNodeClientCert(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool {
if !isNodeClientCert(csr, x509cr) {
return false
}
if csr.Spec.Username != x509cr.Subject.CommonName {
return false
}
return true
}

View File

@ -1,299 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package approver
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/rand"
"net"
"testing"
authorization "k8s.io/api/authorization/v1beta1"
capi "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/fake"
testclient "k8s.io/client-go/testing"
k8s_certificates_v1beta1 "k8s.io/kubernetes/pkg/apis/certificates/v1beta1"
)
func TestHasKubeletUsages(t *testing.T) {
cases := []struct {
usages []capi.KeyUsage
expected bool
}{
{
usages: nil,
expected: false,
},
{
usages: []capi.KeyUsage{},
expected: false,
},
{
usages: []capi.KeyUsage{
capi.UsageKeyEncipherment,
capi.UsageDigitalSignature,
},
expected: false,
},
{
usages: []capi.KeyUsage{
capi.UsageKeyEncipherment,
capi.UsageDigitalSignature,
capi.UsageServerAuth,
},
expected: false,
},
{
usages: []capi.KeyUsage{
capi.UsageKeyEncipherment,
capi.UsageDigitalSignature,
capi.UsageClientAuth,
},
expected: true,
},
}
for _, c := range cases {
if hasExactUsages(&capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Usages: c.usages,
},
}, kubeletClientUsages) != c.expected {
t.Errorf("unexpected result of hasKubeletUsages(%v), expecting: %v", c.usages, c.expected)
}
}
}
func TestHandle(t *testing.T) {
cases := []struct {
message string
allowed bool
recognized bool
err bool
verify func(*testing.T, []testclient.Action)
}{
{
recognized: false,
allowed: false,
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 0 {
t.Errorf("expected no client calls but got: %#v", as)
}
},
},
{
recognized: false,
allowed: true,
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 0 {
t.Errorf("expected no client calls but got: %#v", as)
}
},
},
{
recognized: true,
allowed: false,
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 1 {
t.Errorf("expected 1 call but got: %#v", as)
return
}
_ = as[0].(testclient.CreateActionImpl)
},
err: true,
},
{
recognized: true,
allowed: true,
verify: func(t *testing.T, as []testclient.Action) {
if len(as) != 2 {
t.Errorf("expected two calls but got: %#v", as)
return
}
_ = as[0].(testclient.CreateActionImpl)
a := as[1].(testclient.UpdateActionImpl)
if got, expected := a.Verb, "update"; got != expected {
t.Errorf("got: %v, expected: %v", got, expected)
}
if got, expected := a.Resource, (schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}); got != expected {
t.Errorf("got: %v, expected: %v", got, expected)
}
if got, expected := a.Subresource, "approval"; got != expected {
t.Errorf("got: %v, expected: %v", got, expected)
}
csr := a.Object.(*capi.CertificateSigningRequest)
if len(csr.Status.Conditions) != 1 {
t.Errorf("expected CSR to have approved condition: %#v", csr)
}
c := csr.Status.Conditions[0]
if got, expected := c.Type, capi.CertificateApproved; got != expected {
t.Errorf("got: %v, expected: %v", got, expected)
}
if got, expected := c.Reason, "AutoApproved"; got != expected {
t.Errorf("got: %v, expected: %v", got, expected)
}
},
},
}
for _, c := range cases {
t.Run(fmt.Sprintf("recognized:%v,allowed: %v,err: %v", c.recognized, c.allowed, c.err), func(t *testing.T) {
client := &fake.Clientset{}
client.AddReactor("create", "subjectaccessreviews", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
return true, &authorization.SubjectAccessReview{
Status: authorization.SubjectAccessReviewStatus{
Allowed: c.allowed,
},
}, nil
})
approver := sarApprover{
client: client,
recognizers: []csrRecognizer{
{
successMessage: "tester",
permission: authorization.ResourceAttributes{Group: "foo", Resource: "bar", Subresource: "baz"},
recognize: func(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool {
return c.recognized
},
},
},
}
csr := makeTestCsr()
if err := approver.handle(csr); err != nil && !c.err {
t.Errorf("unexpected err: %v", err)
}
c.verify(t, client.Actions())
})
}
}
func TestRecognizers(t *testing.T) {
goodCases := []func(b *csrBuilder){
func(b *csrBuilder) {
},
}
testRecognizer(t, goodCases, isNodeClientCert, true)
testRecognizer(t, goodCases, isSelfNodeClientCert, true)
badCases := []func(b *csrBuilder){
func(b *csrBuilder) {
b.cn = "mike"
},
func(b *csrBuilder) {
b.orgs = nil
},
func(b *csrBuilder) {
b.orgs = []string{"system:master"}
},
func(b *csrBuilder) {
b.usages = append(b.usages, capi.UsageServerAuth)
},
}
testRecognizer(t, badCases, isNodeClientCert, false)
testRecognizer(t, badCases, isSelfNodeClientCert, false)
// cn different then requestor
differentCN := []func(b *csrBuilder){
func(b *csrBuilder) {
b.requestor = "joe"
},
func(b *csrBuilder) {
b.cn = "system:node:bar"
},
}
testRecognizer(t, differentCN, isNodeClientCert, true)
testRecognizer(t, differentCN, isSelfNodeClientCert, false)
}
func testRecognizer(t *testing.T, cases []func(b *csrBuilder), recognizeFunc func(csr *capi.CertificateSigningRequest, x509cr *x509.CertificateRequest) bool, shouldRecognize bool) {
for _, c := range cases {
b := csrBuilder{
cn: "system:node:foo",
orgs: []string{"system:nodes"},
requestor: "system:node:foo",
usages: []capi.KeyUsage{
capi.UsageKeyEncipherment,
capi.UsageDigitalSignature,
capi.UsageClientAuth,
},
}
c(&b)
t.Run(fmt.Sprintf("csr:%#v", b), func(t *testing.T) {
csr := makeFancyTestCsr(b)
x509cr, err := k8s_certificates_v1beta1.ParseCSR(csr)
if err != nil {
t.Errorf("unexpected err: %v", err)
}
if recognizeFunc(csr, x509cr) != shouldRecognize {
t.Errorf("expected recognized to be %v", shouldRecognize)
}
})
}
}
// noncryptographic for faster testing
// DO NOT COPY THIS CODE
var insecureRand = rand.New(rand.NewSource(0))
func makeTestCsr() *capi.CertificateSigningRequest {
return makeFancyTestCsr(csrBuilder{cn: "test-cert"})
}
type csrBuilder struct {
cn string
orgs []string
requestor string
usages []capi.KeyUsage
dns []string
emails []string
ips []net.IP
}
func makeFancyTestCsr(b csrBuilder) *capi.CertificateSigningRequest {
pk, err := ecdsa.GenerateKey(elliptic.P224(), insecureRand)
if err != nil {
panic(err)
}
csrb, err := x509.CreateCertificateRequest(insecureRand, &x509.CertificateRequest{
Subject: pkix.Name{
CommonName: b.cn,
Organization: b.orgs,
},
DNSNames: b.dns,
EmailAddresses: b.emails,
IPAddresses: b.ips,
}, pk)
if err != nil {
panic(err)
}
return &capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Username: b.requestor,
Usages: b.usages,
Request: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrb}),
},
}
}

View File

@ -1,206 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package certificates implements an abstract controller that is useful for
// building controllers that manage CSRs
package certificates
import (
"fmt"
"time"
"github.com/golang/glog"
"golang.org/x/time/rate"
certificates "k8s.io/api/certificates/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
certificateslisters "k8s.io/client-go/listers/certificates/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
)
type CertificateController struct {
kubeClient clientset.Interface
csrLister certificateslisters.CertificateSigningRequestLister
csrsSynced cache.InformerSynced
handler func(*certificates.CertificateSigningRequest) error
queue workqueue.RateLimitingInterface
}
func NewCertificateController(
kubeClient clientset.Interface,
csrInformer certificatesinformers.CertificateSigningRequestInformer,
handler func(*certificates.CertificateSigningRequest) error,
) *CertificateController {
// Send events to the apiserver
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
cc := &CertificateController{
kubeClient: kubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
), "certificate"),
handler: handler,
}
// Manage the addition/update of certificate requests
csrInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
csr := obj.(*certificates.CertificateSigningRequest)
glog.V(4).Infof("Adding certificate request %s", csr.Name)
cc.enqueueCertificateRequest(obj)
},
UpdateFunc: func(old, new interface{}) {
oldCSR := old.(*certificates.CertificateSigningRequest)
glog.V(4).Infof("Updating certificate request %s", oldCSR.Name)
cc.enqueueCertificateRequest(new)
},
DeleteFunc: func(obj interface{}) {
csr, ok := obj.(*certificates.CertificateSigningRequest)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.V(2).Infof("Couldn't get object from tombstone %#v", obj)
return
}
csr, ok = tombstone.Obj.(*certificates.CertificateSigningRequest)
if !ok {
glog.V(2).Infof("Tombstone contained object that is not a CSR: %#v", obj)
return
}
}
glog.V(4).Infof("Deleting certificate request %s", csr.Name)
cc.enqueueCertificateRequest(obj)
},
})
cc.csrLister = csrInformer.Lister()
cc.csrsSynced = csrInformer.Informer().HasSynced
return cc
}
// Run the main goroutine responsible for watching and syncing jobs.
func (cc *CertificateController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer cc.queue.ShutDown()
glog.Infof("Starting certificate controller")
defer glog.Infof("Shutting down certificate controller")
if !controller.WaitForCacheSync("certificate", stopCh, cc.csrsSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(cc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a thread that dequeues CSRs, handles them, and marks them done.
func (cc *CertificateController) worker() {
for cc.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (cc *CertificateController) processNextWorkItem() bool {
cKey, quit := cc.queue.Get()
if quit {
return false
}
defer cc.queue.Done(cKey)
if err := cc.syncFunc(cKey.(string)); err != nil {
cc.queue.AddRateLimited(cKey)
if _, ignorable := err.(ignorableError); !ignorable {
utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err))
} else {
glog.V(4).Infof("Sync %v failed with : %v", cKey, err)
}
return true
}
cc.queue.Forget(cKey)
return true
}
func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
cc.queue.Add(key)
}
// maybeSignCertificate will inspect the certificate request and, if it has
// been approved and meets policy expectations, generate an X509 cert using the
// cluster CA assets. If successful it will update the CSR approve subresource
// with the signed certificate.
func (cc *CertificateController) syncFunc(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime))
}()
csr, err := cc.csrLister.Get(key)
if errors.IsNotFound(err) {
glog.V(3).Infof("csr has been deleted: %v", key)
return nil
}
if err != nil {
return err
}
if csr.Status.Certificate != nil {
// no need to do anything because it already has a cert
return nil
}
// need to operate on a copy so we don't mutate the csr in the shared cache
csr = csr.DeepCopy()
return cc.handler(csr)
}
// IgnorableError returns an error that we shouldn't handle (i.e. log) because
// it's spammy and usually user error. Instead we will log these errors at a
// higher log level. We still need to throw these errors to signal that the
// sync should be retried.
func IgnorableError(s string, args ...interface{}) ignorableError {
return ignorableError(fmt.Sprintf(s, args...))
}
type ignorableError string
func (e ignorableError) Error() string {
return string(e)
}

View File

@ -1,83 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import (
"testing"
"time"
certificates "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/controller"
)
// TODO flesh this out to cover things like not being able to find the csr in the cache, not
// auto-approving, etc.
func TestCertificateController(t *testing.T) {
csr := &certificates.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "test-csr",
},
}
client := fake.NewSimpleClientset(csr)
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(csr), controller.NoResyncPeriodFunc())
handler := func(csr *certificates.CertificateSigningRequest) error {
csr.Status.Conditions = append(csr.Status.Conditions, certificates.CertificateSigningRequestCondition{
Type: certificates.CertificateApproved,
Reason: "test reason",
Message: "test message",
})
_, err := client.Certificates().CertificateSigningRequests().UpdateApproval(csr)
if err != nil {
return err
}
return nil
}
controller := NewCertificateController(
client,
informerFactory.Certificates().V1beta1().CertificateSigningRequests(),
handler,
)
controller.csrsSynced = func() bool { return true }
stopCh := make(chan struct{})
defer close(stopCh)
informerFactory.Start(stopCh)
informerFactory.WaitForCacheSync(stopCh)
wait.PollUntil(10*time.Millisecond, func() (bool, error) {
return controller.queue.Len() >= 1, nil
}, stopCh)
controller.processNextWorkItem()
actions := client.Actions()
if len(actions) != 1 {
t.Errorf("expected 1 actions")
}
if a := actions[0]; !a.Matches("update", "certificatesigningrequests") ||
a.GetSubresource() != "approval" {
t.Errorf("unexpected action: %#v", a)
}
}

View File

@ -1,38 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package certificates
import certificates "k8s.io/api/certificates/v1beta1"
// IsCertificateRequestApproved returns true if a certificate request has the
// "Approved" condition and no "Denied" conditions; false otherwise.
func IsCertificateRequestApproved(csr *certificates.CertificateSigningRequest) bool {
approved, denied := GetCertApprovalCondition(&csr.Status)
return approved && !denied
}
func GetCertApprovalCondition(status *certificates.CertificateSigningRequestStatus) (approved bool, denied bool) {
for _, c := range status.Conditions {
if c.Type == certificates.CertificateApproved {
approved = true
}
if c.Type == certificates.CertificateDenied {
denied = true
}
}
return
}

View File

@ -1,44 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["cleaner.go"],
importpath = "k8s.io/kubernetes/pkg/controller/certificates/cleaner",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/certificates/v1beta1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["cleaner_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
],
)

View File

@ -1,200 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cleaner implements an automated cleaner that does garbage collection
// on CSRs that meet specific criteria. With automated CSR requests and
// automated approvals, the volume of CSRs only increases over time, at a rapid
// rate if the certificate duration is short.
package cleaner
import (
"crypto/x509"
"encoding/pem"
"fmt"
"time"
"github.com/golang/glog"
capi "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
csrclient "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
certificateslisters "k8s.io/client-go/listers/certificates/v1beta1"
)
const (
// The interval to list all CSRs and check each one against the criteria to
// automatically clean it up.
pollingInterval = 1 * time.Hour
// The time periods after which these different CSR statuses should be
// cleaned up.
approvedExpiration = 1 * time.Hour
deniedExpiration = 1 * time.Hour
pendingExpiration = 24 * time.Hour
)
// CSRCleanerController is a controller that garbage collects old certificate
// signing requests (CSRs). Since there are mechanisms that automatically
// create CSRs, and mechanisms that automatically approve CSRs, in order to
// prevent a build up of CSRs over time, it is necessary to GC them. CSRs will
// be removed if they meet one of the following criteria: the CSR is Approved
// with a certificate and is old enough to be past the GC issued deadline, the
// CSR is denied and is old enough to be past the GC denied deadline, the CSR
// is Pending and is old enough to be past the GC pending deadline, the CSR is
// approved with a certificate and the certificate is expired.
type CSRCleanerController struct {
csrClient csrclient.CertificateSigningRequestInterface
csrLister certificateslisters.CertificateSigningRequestLister
}
// NewCSRCleanerController creates a new CSRCleanerController.
func NewCSRCleanerController(
csrClient csrclient.CertificateSigningRequestInterface,
csrInformer certificatesinformers.CertificateSigningRequestInformer,
) *CSRCleanerController {
return &CSRCleanerController{
csrClient: csrClient,
csrLister: csrInformer.Lister(),
}
}
// Run the main goroutine responsible for watching and syncing jobs.
func (ccc *CSRCleanerController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting CSR cleaner controller")
defer glog.Infof("Shutting down CSR cleaner controller")
for i := 0; i < workers; i++ {
go wait.Until(ccc.worker, pollingInterval, stopCh)
}
<-stopCh
}
// worker runs a thread that dequeues CSRs, handles them, and marks them done.
func (ccc *CSRCleanerController) worker() {
csrs, err := ccc.csrLister.List(labels.Everything())
if err != nil {
glog.Errorf("Unable to list CSRs: %v", err)
return
}
for _, csr := range csrs {
if err := ccc.handle(csr); err != nil {
glog.Errorf("Error while attempting to clean CSR %q: %v", csr.Name, err)
}
}
}
func (ccc *CSRCleanerController) handle(csr *capi.CertificateSigningRequest) error {
isIssuedExpired, err := isIssuedExpired(csr)
if err != nil {
return err
}
if isIssuedPastDeadline(csr) || isDeniedPastDeadline(csr) || isPendingPastDeadline(csr) || isIssuedExpired {
if err := ccc.csrClient.Delete(csr.Name, nil); err != nil {
return fmt.Errorf("unable to delete CSR %q: %v", csr.Name, err)
}
}
return nil
}
// isIssuedExpired checks if the CSR has been issued a certificate and if the
// expiration of the certificate (the NotAfter value) has passed.
func isIssuedExpired(csr *capi.CertificateSigningRequest) (bool, error) {
for _, c := range csr.Status.Conditions {
isExpired, err := isExpired(csr)
if err != nil {
return false, err
}
if c.Type == capi.CertificateApproved && isIssued(csr) && isExpired {
glog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name)
return true, nil
}
}
return false, nil
}
// isPendingPastDeadline checks if the certificate has a Pending status and the
// creation time of the CSR is passed the deadline that pending requests are
// maintained for.
func isPendingPastDeadline(csr *capi.CertificateSigningRequest) bool {
// If there are no Conditions on the status, the CSR will appear via
// `kubectl` as `Pending`.
if len(csr.Status.Conditions) == 0 && isOlderThan(csr.CreationTimestamp, pendingExpiration) {
glog.Infof("Cleaning CSR %q as it is more than %v old and unhandled.", csr.Name, pendingExpiration)
return true
}
return false
}
// isDeniedPastDeadline checks if the certificate has a Denied status and the
// creation time of the CSR is passed the deadline that denied requests are
// maintained for.
func isDeniedPastDeadline(csr *capi.CertificateSigningRequest) bool {
for _, c := range csr.Status.Conditions {
if c.Type == capi.CertificateDenied && isOlderThan(c.LastUpdateTime, deniedExpiration) {
glog.Infof("Cleaning CSR %q as it is more than %v old and denied.", csr.Name, deniedExpiration)
return true
}
}
return false
}
// isIssuedPastDeadline checks if the certificate has an Issued status and the
// creation time of the CSR is passed the deadline that issued requests are
// maintained for.
func isIssuedPastDeadline(csr *capi.CertificateSigningRequest) bool {
for _, c := range csr.Status.Conditions {
if c.Type == capi.CertificateApproved && isIssued(csr) && isOlderThan(c.LastUpdateTime, approvedExpiration) {
glog.Infof("Cleaning CSR %q as it is more than %v old and approved.", csr.Name, approvedExpiration)
return true
}
}
return false
}
// isOlderThan checks that t is a non-zero time after time.Now() + d.
func isOlderThan(t metav1.Time, d time.Duration) bool {
return !t.IsZero() && t.Sub(time.Now()) < -1*d
}
// isIssued checks if the CSR has `Issued` status. There is no explicit
// 'Issued' status. Implicitly, if there is a certificate associated with the
// CSR, the CSR statuses that are visible via `kubectl` will include 'Issued'.
func isIssued(csr *capi.CertificateSigningRequest) bool {
return csr.Status.Certificate != nil
}
// isExpired checks if the CSR has a certificate and the date in the `NotAfter`
// field has gone by.
func isExpired(csr *capi.CertificateSigningRequest) (bool, error) {
if csr.Status.Certificate == nil {
return false, nil
}
block, _ := pem.Decode(csr.Status.Certificate)
if block == nil {
return false, fmt.Errorf("expected the certificate associated with the CSR to be PEM encoded")
}
certs, err := x509.ParseCertificates(block.Bytes)
if err != nil {
return false, fmt.Errorf("unable to parse certificate data: %v", err)
}
return time.Now().After(certs[0].NotAfter), nil
}

View File

@ -1,201 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cleaner
import (
"testing"
"time"
capi "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
const (
expiredCert = `-----BEGIN CERTIFICATE-----
MIICIzCCAc2gAwIBAgIJAOApTlMFDOUnMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV
BAYTAkdCMQ8wDQYDVQQIDAZMb25kb24xDzANBgNVBAcMBkxvbmRvbjEYMBYGA1UE
CgwPR2xvYmFsIFNlY3VyaXR5MRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MQowCAYD
VQQDDAEqMB4XDTE3MTAwNDIwNDgzOFoXDTE3MTAwMzIwNDgzOFowbTELMAkGA1UE
BhMCR0IxDzANBgNVBAgMBkxvbmRvbjEPMA0GA1UEBwwGTG9uZG9uMRgwFgYDVQQK
DA9HbG9iYWwgU2VjdXJpdHkxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxCjAIBgNV
BAMMASowXDANBgkqhkiG9w0BAQEFAANLADBIAkEA3Gt0KmuRXDxvqZUiX/xqAn1t
nZZX98guZvPPyxnQtV3YpA274W0sX3jL+U71Ya+3kaUstXQa4YrWBUHiXoqJnwID
AQABo1AwTjAdBgNVHQ4EFgQUtDsIpzHoUiLsO88f9fm+G0tYSPowHwYDVR0jBBgw
FoAUtDsIpzHoUiLsO88f9fm+G0tYSPowDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
AQsFAANBADfrlKof5CUkxGlX9Rifxv/mWOk8ZuTLWfMYQH2nycBHnmOxy6sR+87W
/Mb/uRz0TXVnGVcbu5E8Bz7e/Far1ZI=
-----END CERTIFICATE-----`
unexpiredCert = `-----BEGIN CERTIFICATE-----
MIICJTCCAc+gAwIBAgIJAIRjMToP+pPEMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV
BAYTAkdCMQ8wDQYDVQQIDAZMb25kb24xDzANBgNVBAcMBkxvbmRvbjEYMBYGA1UE
CgwPR2xvYmFsIFNlY3VyaXR5MRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MQowCAYD
VQQDDAEqMCAXDTE3MTAwNDIwNDUyNFoYDzIxMTcwOTEwMjA0NTI0WjBtMQswCQYD
VQQGEwJHQjEPMA0GA1UECAwGTG9uZG9uMQ8wDQYDVQQHDAZMb25kb24xGDAWBgNV
BAoMD0dsb2JhbCBTZWN1cml0eTEWMBQGA1UECwwNSVQgRGVwYXJ0bWVudDEKMAgG
A1UEAwwBKjBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQC7j9BAV5HqIJGi6r4G4YeI
ioHxH2loVu8IOKSK7xVs3v/EjR/eXbQzM+jZU7duyZqn6YjySZNLl0K0MfHCHBgX
AgMBAAGjUDBOMB0GA1UdDgQWBBTwxV40NFSNW7lpQ3eUWX7Mxs03yzAfBgNVHSME
GDAWgBTwxV40NFSNW7lpQ3eUWX7Mxs03yzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3
DQEBCwUAA0EALDi9OidANHflx8q+w3p0rJo9gpA6cJcFpEtP2Lv4kvOtB1f6L0jY
MLd7MVm4cS/MNcx4L7l23UC3Hx4+nAxvIg==
-----END CERTIFICATE-----`
)
func TestCleanerWithApprovedExpiredCSR(t *testing.T) {
testCases := []struct {
name string
created metav1.Time
certificate []byte
conditions []capi.CertificateSigningRequestCondition
expectedActions []string
}{
{
"no delete approved not passed deadline",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
[]byte(unexpiredCert),
[]capi.CertificateSigningRequestCondition{
{
Type: capi.CertificateApproved,
LastUpdateTime: metav1.NewTime(time.Now().Add(-50 * time.Minute)),
},
},
[]string{},
},
{
"no delete approved passed deadline not issued",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
nil,
[]capi.CertificateSigningRequestCondition{
{
Type: capi.CertificateApproved,
LastUpdateTime: metav1.NewTime(time.Now().Add(-50 * time.Minute)),
},
},
[]string{},
},
{
"delete approved passed deadline",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
[]byte(unexpiredCert),
[]capi.CertificateSigningRequestCondition{
{
Type: capi.CertificateApproved,
LastUpdateTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)),
},
},
[]string{"delete"},
},
{
"no delete denied not passed deadline",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
nil,
[]capi.CertificateSigningRequestCondition{
{
Type: capi.CertificateDenied,
LastUpdateTime: metav1.NewTime(time.Now().Add(-50 * time.Minute)),
},
},
[]string{},
},
{
"delete denied passed deadline",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
nil,
[]capi.CertificateSigningRequestCondition{
{
Type: capi.CertificateDenied,
LastUpdateTime: metav1.NewTime(time.Now().Add(-2 * time.Hour)),
},
},
[]string{"delete"},
},
{
"no delete pending not passed deadline",
metav1.NewTime(time.Now().Add(-5 * time.Hour)),
nil,
[]capi.CertificateSigningRequestCondition{},
[]string{},
},
{
"delete pending passed deadline",
metav1.NewTime(time.Now().Add(-25 * time.Hour)),
nil,
[]capi.CertificateSigningRequestCondition{},
[]string{"delete"},
},
{
"no delete approved not passed deadline unexpired",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
[]byte(unexpiredCert),
[]capi.CertificateSigningRequestCondition{
{
Type: capi.CertificateApproved,
LastUpdateTime: metav1.NewTime(time.Now().Add(-50 * time.Minute)),
},
},
[]string{},
},
{
"delete approved not passed deadline expired",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
[]byte(expiredCert),
[]capi.CertificateSigningRequestCondition{
{
Type: capi.CertificateApproved,
LastUpdateTime: metav1.NewTime(time.Now().Add(-50 * time.Minute)),
},
},
[]string{"delete"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
csr := &capi.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-csr",
CreationTimestamp: tc.created,
},
Status: capi.CertificateSigningRequestStatus{
Certificate: tc.certificate,
Conditions: tc.conditions,
},
}
client := fake.NewSimpleClientset(csr)
s := &CSRCleanerController{
csrClient: client.CertificatesV1beta1().CertificateSigningRequests(),
}
err := s.handle(csr)
if err != nil {
t.Fatalf("failed to clean CSR: %v", err)
}
actions := client.Actions()
if len(actions) != len(tc.expectedActions) {
t.Fatalf("got %d actions, wanted %d actions", len(actions), len(tc.expectedActions))
}
for i := 0; i < len(actions); i++ {
if a := actions[i]; !a.Matches(tc.expectedActions[i], "certificatesigningrequests") {
t.Errorf("got action %#v, wanted %v", a, tc.expectedActions[i])
}
}
})
}
}

View File

@ -1,51 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["cfssl_signer_test.go"],
data = [
"testdata/ca.crt",
"testdata/ca.key",
"testdata/kubelet.csr",
],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["cfssl_signer.go"],
importpath = "k8s.io/kubernetes/pkg/controller/certificates/signer",
deps = [
"//pkg/controller/certificates:go_default_library",
"//vendor/github.com/cloudflare/cfssl/config:go_default_library",
"//vendor/github.com/cloudflare/cfssl/helpers:go_default_library",
"//vendor/github.com/cloudflare/cfssl/signer:go_default_library",
"//vendor/github.com/cloudflare/cfssl/signer/local:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,138 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package signer implements a CA signer that uses keys stored on local disk.
package signer
import (
"crypto"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"time"
capi "k8s.io/api/certificates/v1beta1"
certificatesinformers "k8s.io/client-go/informers/certificates/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller/certificates"
"github.com/cloudflare/cfssl/config"
"github.com/cloudflare/cfssl/helpers"
"github.com/cloudflare/cfssl/signer"
"github.com/cloudflare/cfssl/signer/local"
)
func NewCSRSigningController(
client clientset.Interface,
csrInformer certificatesinformers.CertificateSigningRequestInformer,
caFile, caKeyFile string,
certificateDuration time.Duration,
) (*certificates.CertificateController, error) {
signer, err := newCFSSLSigner(caFile, caKeyFile, client, certificateDuration)
if err != nil {
return nil, err
}
return certificates.NewCertificateController(
client,
csrInformer,
signer.handle,
), nil
}
type cfsslSigner struct {
ca *x509.Certificate
priv crypto.Signer
sigAlgo x509.SignatureAlgorithm
client clientset.Interface
certificateDuration time.Duration
}
func newCFSSLSigner(caFile, caKeyFile string, client clientset.Interface, certificateDuration time.Duration) (*cfsslSigner, error) {
ca, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("error reading CA cert file %q: %v", caFile, err)
}
cakey, err := ioutil.ReadFile(caKeyFile)
if err != nil {
return nil, fmt.Errorf("error reading CA key file %q: %v", caKeyFile, err)
}
parsedCa, err := helpers.ParseCertificatePEM(ca)
if err != nil {
return nil, fmt.Errorf("error parsing CA cert file %q: %v", caFile, err)
}
strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD")
password := []byte(strPassword)
if strPassword == "" {
password = nil
}
priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
if err != nil {
return nil, fmt.Errorf("Malformed private key %v", err)
}
return &cfsslSigner{
priv: priv,
ca: parsedCa,
sigAlgo: signer.DefaultSigAlgo(priv),
client: client,
certificateDuration: certificateDuration,
}, nil
}
func (s *cfsslSigner) handle(csr *capi.CertificateSigningRequest) error {
if !certificates.IsCertificateRequestApproved(csr) {
return nil
}
csr, err := s.sign(csr)
if err != nil {
return fmt.Errorf("error auto signing csr: %v", err)
}
_, err = s.client.CertificatesV1beta1().CertificateSigningRequests().UpdateStatus(csr)
if err != nil {
return fmt.Errorf("error updating signature for csr: %v", err)
}
return nil
}
func (s *cfsslSigner) sign(csr *capi.CertificateSigningRequest) (*capi.CertificateSigningRequest, error) {
var usages []string
for _, usage := range csr.Spec.Usages {
usages = append(usages, string(usage))
}
policy := &config.Signing{
Default: &config.SigningProfile{
Usage: usages,
Expiry: s.certificateDuration,
ExpiryString: s.certificateDuration.String(),
},
}
cfs, err := local.NewSigner(s.priv, s.ca, s.sigAlgo, policy)
if err != nil {
return nil, err
}
csr.Status.Certificate, err = cfs.Sign(signer.SignRequest{
Request: string(csr.Spec.Request),
})
if err != nil {
return nil, err
}
return csr, nil
}

View File

@ -1,84 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package signer
import (
"crypto/x509"
"io/ioutil"
"reflect"
"testing"
"time"
capi "k8s.io/api/certificates/v1beta1"
"k8s.io/client-go/util/cert"
)
func TestSigner(t *testing.T) {
s, err := newCFSSLSigner("./testdata/ca.crt", "./testdata/ca.key", nil, 1*time.Hour)
if err != nil {
t.Fatalf("failed to create signer: %v", err)
}
csrb, err := ioutil.ReadFile("./testdata/kubelet.csr")
if err != nil {
t.Fatalf("failed to read CSR: %v", err)
}
csr := &capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Request: []byte(csrb),
Usages: []capi.KeyUsage{
capi.UsageSigning,
capi.UsageKeyEncipherment,
capi.UsageServerAuth,
capi.UsageClientAuth,
},
},
}
csr, err = s.sign(csr)
if err != nil {
t.Fatalf("failed to sign CSR: %v", err)
}
certData := csr.Status.Certificate
if len(certData) == 0 {
t.Fatalf("expected a certificate after signing")
}
certs, err := cert.ParseCertsPEM(certData)
if err != nil {
t.Fatalf("failed to parse certificate: %v", err)
}
if len(certs) != 1 {
t.Fatalf("expected one certificate")
}
crt := certs[0]
if crt.Subject.CommonName != "system:node:k-a-node-s36b" {
t.Errorf("expected common name of 'system:node:k-a-node-s36b', but got: %v", certs[0].Subject.CommonName)
}
if !reflect.DeepEqual(crt.Subject.Organization, []string{"system:nodes"}) {
t.Errorf("expected organization to be [system:nodes] but got: %v", crt.Subject.Organization)
}
if crt.KeyUsage != x509.KeyUsageDigitalSignature|x509.KeyUsageKeyEncipherment {
t.Errorf("bad key usage")
}
if !reflect.DeepEqual(crt.ExtKeyUsage, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}) {
t.Errorf("bad extended key usage")
}
}

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC9zCCAd+gAwIBAgIJAOWJ8tWNUIsZMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV
BAMMB2t1YmUtY2EwHhcNMTYxMjIyMDAyNTI5WhcNNDQwNTA5MDAyNTI5WjASMRAw
DgYDVQQDDAdrdWJlLWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
1HK1d2p7N7UC6px8lVtABw8jPpVyNYjrJmI+TKTTdCgWGsUTFMCw4t4Q/KQDDlvB
P19uPhbfp8aLwOWXBCxOPZzlM2mAEjSUgKjbyGCW/8vaXa2VgQm3tKZdydKiFvIo
fEsNA+58w8A0WWEB8wYFcdCt8uPyQ0ws/TxE+WW3u7EPlC0/inIX9JqeZZMpDk3N
lHEv/pGEjQmoet/hBwGHq9PKepkN5/V6rrSADJ5I4Uklp2f7G9MCP/zV8xKfs0lK
CMoJsIPK3nL9N3C0rqBQPfcyKE2fnEkxC3UVZA8brvLTkBfOgmM2eVg/nauU1ejv
zOJL7tDwUioLriw2hiGrFwIDAQABo1AwTjAdBgNVHQ4EFgQUbGJxJeW7BgZ4xSmW
d3Aw3gq8YZUwHwYDVR0jBBgwFoAUbGJxJeW7BgZ4xSmWd3Aw3gq8YZUwDAYDVR0T
BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAunzpYAxpzguzxG83pK5n3ObsGDwO
78d38qX1VRvMLPvioZxYgquqqFPdLI3xe8b8KdZNzb65549tgjAI17tTKGTRgJu5
yzLU1tO4vNaAFecMCtPvElYfkrAv2vbGCVJ1bYKTnjdu3083jG3sY9TDj0364A57
lNwKEd5uxHGWg4H+NbyHkDqfKmllzLvJ9XjSWBPmNVLSW50hV+h9fUXgz9LN+qVY
VEDfAEWqb6PVy9ANw8A8QLnuSRxbd7hAigtlC4MwzYJ6tyFIIH6bCIgfoZuA+brm
WGcpIxl4fKEGafSgjsK/6Yhb61mkhHmG16mzEUZNkNsjiYJuF2QxpOlQrw==
-----END CERTIFICATE-----

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEA1HK1d2p7N7UC6px8lVtABw8jPpVyNYjrJmI+TKTTdCgWGsUT
FMCw4t4Q/KQDDlvBP19uPhbfp8aLwOWXBCxOPZzlM2mAEjSUgKjbyGCW/8vaXa2V
gQm3tKZdydKiFvIofEsNA+58w8A0WWEB8wYFcdCt8uPyQ0ws/TxE+WW3u7EPlC0/
inIX9JqeZZMpDk3NlHEv/pGEjQmoet/hBwGHq9PKepkN5/V6rrSADJ5I4Uklp2f7
G9MCP/zV8xKfs0lKCMoJsIPK3nL9N3C0rqBQPfcyKE2fnEkxC3UVZA8brvLTkBfO
gmM2eVg/nauU1ejvzOJL7tDwUioLriw2hiGrFwIDAQABAoIBAFJCmEFE2bEYRajS
LusmCgSxt9PjyfUwrtyN7dF/gODZJLX42QqQEe3GTo2EdCp7HLiNGwKvmKo+Fp76
Rx82iJUSyyy9DPn/ogCvYWqU++LP7B2ZuOnd+WPZhzc+d8Sqv0JhTQjYrzaclaiG
B1syWalYRAJogMXOGR102MA4wovJrlHFuTVSWiDe0uguLxyjoTMIRqbib9ZAMSLX
bfcM2abGpXgq10abda3KKAJbZyr2fnBvqKTs4a4zYeHJpQT+NBPMiryb2WnPFg+b
93nrjDxUtPsx8NJz6HGkSQLagXkZX2J1JpT8loaNIdyQHab1LNXptc84LR8xxusy
bs5NowECgYEA+j+SwVgeC+NCUIfxr3F9zPAD9A0Tk3gD4z+j0opfLIMghX4jtK0e
9fQyglecAbojlkEUk/js5IVZ0IIhBNPWXxKtdShZO7EmJ6Z5IEmFrZK1xUomYBa2
BfysqSAkxVLsTDIfI0Q4DHQNDOV+iY3j8WoaR51cXr+IY+mYBGSNI80CgYEA2VS5
X5QHDxoh3r5ORiyab3ciubEofJ29D3NR1tCe9ZgSYRV5Y7T/4KPpZdpsEX/ydYD6
X4DyURuYNK7PUR8DSlX7/VuMzHThqGJMaT0LE+alU4bruiad33X1WXgtcPTGCic0
8il50TZTgba0CwxuCO1eVb3IijwgJBX/byM67nMCgYEA7As1KSwtwzbMoVtpa/xY
Fgu7HuOKuIn22M55fylH1puk/GXb1huJ3aNGVU2/+J0T3jFq8JxXDsJ90kA8Vupe
BXV/qceyS6yv+ax8Cilvbya4T+y+P9qMPR912V1Zccri2ohYeJJrb8uzV5vM/ICb
JmbXfP+AVlrBksSOwG37920CgYEAsSi2X6o8QtxLhdZd2ihbz8cu4G4AkezHhAO+
T70KBytquAcYR+Xwu38CMEvn0jAZRh3YeueTH/i9jxx81STRutPysSni0Xvpwyg2
H4dqM1PNqxQNrlXyVYlDciZb7HsrwHULXOfgbGG7mr6Db4o3XEGap4woID84+BGS
glcWn+8CgYEA36uulmZcodfet04qQvlDtr1d7mwLdTR/JAO0ZBIgFH7eGZdEVh8O
DoTJTdSSJGiv8J35PwEXfhKHjhgOjDocLYu+yCOwVj7jRdHqlDS1BaE36Hzdw0rb
mWkBRMGJtGhzhoRJEFHAnoLXc9danRfnHwVR58drlf7bjR5I9eU9u1I=
-----END RSA PRIVATE KEY-----

View File

@ -1,8 +0,0 @@
-----BEGIN CERTIFICATE REQUEST-----
MIH1MIGdAgEAMDsxFTATBgNVBAoTDHN5c3RlbTpub2RlczEiMCAGA1UEAxMZc3lz
dGVtOm5vZGU6ay1hLW5vZGUtczM2YjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IA
BJbxa5Y8SrUJVHpOoWD5ceqH+5R9mjIhwVP2sqfTcLkjvbitzOiLlxSq/LwJ+qq7
kVpf9f3GopZVhRWbYSCg0YGgADAKBggqhkjOPQQDAgNHADBEAiAabb6XFtPOJUCQ
+84NhxLEvPANhrtwFq3Q0qFZ9TzH5QIgc/697RTTcbri2lVj+10dLFIC3VYJ7br4
QjA7haCYXrA=
-----END CERTIFICATE REQUEST-----

View File

@ -1,292 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"time"
v1authenticationapi "k8s.io/api/authentication/v1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/serviceaccount"
"github.com/golang/glog"
)
// ControllerClientBuilder allows you to get clients and configs for controllers
type ControllerClientBuilder interface {
Config(name string) (*restclient.Config, error)
ConfigOrDie(name string) *restclient.Config
Client(name string) (clientset.Interface, error)
ClientOrDie(name string) clientset.Interface
ClientGoClient(name string) (clientset.Interface, error)
ClientGoClientOrDie(name string) clientset.Interface
}
// SimpleControllerClientBuilder returns a fixed client with different user agents
type SimpleControllerClientBuilder struct {
// ClientConfig is a skeleton config to clone and use as the basis for each controller client
ClientConfig *restclient.Config
}
func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, error) {
clientConfig := *b.ClientConfig
return restclient.AddUserAgent(&clientConfig, name), nil
}
func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
clientConfig, err := b.Config(name)
if err != nil {
glog.Fatal(err)
}
return clientConfig
}
func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientset.NewForConfig(clientConfig)
}
func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
client, err := b.Client(name)
if err != nil {
glog.Fatal(err)
}
return client
}
func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientset.NewForConfig(clientConfig)
}
func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
client, err := b.ClientGoClient(name)
if err != nil {
glog.Fatal(err)
}
return client
}
// SAControllerClientBuilder is a ControllerClientBuilder that returns clients identifying as
// service accounts
type SAControllerClientBuilder struct {
// ClientConfig is a skeleton config to clone and use as the basis for each controller client
ClientConfig *restclient.Config
// CoreClient is used to provision service accounts if needed and watch for their associated tokens
// to construct a controller client
CoreClient v1core.CoreV1Interface
// AuthenticationClient is used to check API tokens to make sure they are valid before
// building a controller client from them
AuthenticationClient v1authentication.AuthenticationV1Interface
// Namespace is the namespace used to host the service accounts that will back the
// controllers. It must be highly privileged namespace which normal users cannot inspect.
Namespace string
}
// config returns a complete clientConfig for constructing clients. This is separate in anticipation of composition
// which means that not all clientsets are known here
func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, error) {
sa, err := b.getOrCreateServiceAccount(name)
if err != nil {
return nil, err
}
var clientConfig *restclient.Config
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).Watch(options)
},
}
_, err = cache.ListWatchUntil(30*time.Second, lw,
func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, nil
case watch.Error:
return false, fmt.Errorf("error watching")
case watch.Added, watch.Modified:
secret, ok := event.Object.(*v1.Secret)
if !ok {
return false, fmt.Errorf("unexpected object type: %T", event.Object)
}
if !serviceaccount.IsServiceAccountToken(secret, sa) {
return false, nil
}
if len(secret.Data[v1.ServiceAccountTokenKey]) == 0 {
return false, nil
}
validConfig, valid, err := b.getAuthenticatedConfig(sa, string(secret.Data[v1.ServiceAccountTokenKey]))
if err != nil {
glog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err)
// continue watching for good tokens
return false, nil
}
if !valid {
glog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace)
// try to delete the secret containing the invalid token
if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
glog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err)
}
// continue watching for good tokens
return false, nil
}
clientConfig = validConfig
return true, nil
default:
return false, fmt.Errorf("unexpected event type: %v", event.Type)
}
})
if err != nil {
return nil, fmt.Errorf("unable to get token for service account: %v", err)
}
return clientConfig, nil
}
func (b SAControllerClientBuilder) getOrCreateServiceAccount(name string) (*v1.ServiceAccount, error) {
sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name, metav1.GetOptions{})
if err == nil {
return sa, nil
}
if !apierrors.IsNotFound(err) {
return nil, err
}
// Create the namespace if we can't verify it exists.
// Tolerate errors, since we don't know whether this component has namespace creation permissions.
if _, err := b.CoreClient.Namespaces().Get(b.Namespace, metav1.GetOptions{}); err != nil {
b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: b.Namespace}})
}
// Create the service account
sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: b.Namespace, Name: name}})
if apierrors.IsAlreadyExists(err) {
// If we're racing to init and someone else already created it, re-fetch
return b.CoreClient.ServiceAccounts(b.Namespace).Get(name, metav1.GetOptions{})
}
return sa, err
}
func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, token string) (*restclient.Config, bool, error) {
username := apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name)
clientConfig := restclient.AnonymousClientConfig(b.ClientConfig)
clientConfig.BearerToken = token
restclient.AddUserAgent(clientConfig, username)
// Try token review first
tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}}
if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil {
if !tokenResult.Status.Authenticated {
glog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace)
return nil, false, nil
}
if tokenResult.Status.User.Username != username {
glog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username)
return nil, false, nil
}
glog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace)
return clientConfig, true, nil
}
// If we couldn't run the token review, the API might be disabled or we might not have permission.
// Try to make a request to /apis with the token. If we get a 401 we should consider the token invalid.
clientConfigCopy := *clientConfig
clientConfigCopy.NegotiatedSerializer = legacyscheme.Codecs
client, err := restclient.UnversionedRESTClientFor(&clientConfigCopy)
if err != nil {
return nil, false, err
}
err = client.Get().AbsPath("/apis").Do().Error()
if apierrors.IsUnauthorized(err) {
glog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err)
return nil, false, nil
}
return clientConfig, true, nil
}
func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config {
clientConfig, err := b.Config(name)
if err != nil {
glog.Fatal(err)
}
return clientConfig
}
func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientset.NewForConfig(clientConfig)
}
func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
client, err := b.Client(name)
if err != nil {
glog.Fatal(err)
}
return client
}
func (b SAControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) {
clientConfig, err := b.Config(name)
if err != nil {
return nil, err
}
return clientset.NewForConfig(clientConfig)
}
func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface {
client, err := b.ClientGoClient(name)
if err != nil {
glog.Fatal(err)
}
return client
}

View File

@ -1,86 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"node_controller.go",
"pvlcontroller.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/cloud",
deps = [
"//pkg/api/v1/node:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/util/node:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/util/node:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"node_controller_test.go",
"pvlcontroller_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/testutil:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,10 +0,0 @@
approvers:
- thockin
- luxas
- wlan0
- andrewsykim
reviewers:
- thockin
- luxas
- wlan0
- andrewsykim

View File

@ -1,539 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"context"
"errors"
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry"
nodeutilv1 "k8s.io/kubernetes/pkg/api/v1/node"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
nodectrlutil "k8s.io/kubernetes/pkg/controller/util/node"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
nodeutil "k8s.io/kubernetes/pkg/util/node"
)
var UpdateNodeSpecBackoff = wait.Backoff{
Steps: 20,
Duration: 50 * time.Millisecond,
Jitter: 1.0,
}
type CloudNodeController struct {
nodeInformer coreinformers.NodeInformer
kubeClient clientset.Interface
recorder record.EventRecorder
cloud cloudprovider.Interface
// Value controlling NodeController monitoring period, i.e. how often does NodeController
// check node status posted from kubelet. This value should be lower than nodeMonitorGracePeriod
// set in controller-manager
nodeMonitorPeriod time.Duration
nodeStatusUpdateFrequency time.Duration
}
const (
// nodeStatusUpdateRetry controls the number of retries of writing NodeStatus update.
nodeStatusUpdateRetry = 5
// The amount of time the nodecontroller should sleep between retrying NodeStatus updates
retrySleepTime = 20 * time.Millisecond
)
// NewCloudNodeController creates a CloudNodeController object
func NewCloudNodeController(
nodeInformer coreinformers.NodeInformer,
kubeClient clientset.Interface,
cloud cloudprovider.Interface,
nodeMonitorPeriod time.Duration,
nodeStatusUpdateFrequency time.Duration) *CloudNodeController {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
cnc := &CloudNodeController{
nodeInformer: nodeInformer,
kubeClient: kubeClient,
recorder: recorder,
cloud: cloud,
nodeMonitorPeriod: nodeMonitorPeriod,
nodeStatusUpdateFrequency: nodeStatusUpdateFrequency,
}
// Use shared informer to listen to add/update of nodes. Note that any nodes
// that exist before node controller starts will show up in the update method
cnc.nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cnc.AddCloudNode,
UpdateFunc: cnc.UpdateCloudNode,
})
return cnc
}
// This controller deletes a node if kubelet is not reporting
// and the node is gone from the cloud provider.
func (cnc *CloudNodeController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
// The following loops run communicate with the APIServer with a worst case complexity
// of O(num_nodes) per cycle. These functions are justified here because these events fire
// very infrequently. DO NOT MODIFY this to perform frequent operations.
// Start a loop to periodically update the node addresses obtained from the cloud
go wait.Until(cnc.UpdateNodeStatus, cnc.nodeStatusUpdateFrequency, stopCh)
// Start a loop to periodically check if any nodes have been deleted from cloudprovider
go wait.Until(cnc.MonitorNode, cnc.nodeMonitorPeriod, stopCh)
}
// UpdateNodeStatus updates the node status, such as node addresses
func (cnc *CloudNodeController) UpdateNodeStatus() {
instances, ok := cnc.cloud.Instances()
if !ok {
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
return
}
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
glog.Errorf("Error monitoring node status: %v", err)
return
}
for i := range nodes.Items {
cnc.updateNodeAddress(&nodes.Items[i], instances)
}
}
// UpdateNodeAddress updates the nodeAddress of a single node
func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloudprovider.Instances) {
// Do not process nodes that are still tainted
cloudTaint := getCloudTaint(node.Spec.Taints)
if cloudTaint != nil {
glog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name)
return
}
// Node that isn't present according to the cloud provider shouldn't have its address updated
exists, err := ensureNodeExistsByProviderID(instances, node)
if err != nil {
// Continue to update node address when not sure the node is not exists
glog.Errorf("%v", err)
} else if !exists {
glog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name)
return
}
nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, node)
if err != nil {
glog.Errorf("%v", err)
return
}
if len(nodeAddresses) == 0 {
glog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name)
return
}
// Check if a hostname address exists in the cloud provided addresses
hostnameExists := false
for i := range nodeAddresses {
if nodeAddresses[i].Type == v1.NodeHostName {
hostnameExists = true
}
}
// If hostname was not present in cloud provided addresses, use the hostname
// from the existing node (populated by kubelet)
if !hostnameExists {
for _, addr := range node.Status.Addresses {
if addr.Type == v1.NodeHostName {
nodeAddresses = append(nodeAddresses, addr)
}
}
}
// If nodeIP was suggested by user, ensure that
// it can be found in the cloud as well (consistent with the behaviour in kubelet)
if nodeIP, ok := ensureNodeProvidedIPExists(node, nodeAddresses); ok {
if nodeIP == nil {
glog.Errorf("Specified Node IP not found in cloudprovider")
return
}
}
newNode := node.DeepCopy()
newNode.Status.Addresses = nodeAddresses
if !nodeAddressesChangeDetected(node.Status.Addresses, newNode.Status.Addresses) {
return
}
_, _, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode)
if err != nil {
glog.Errorf("Error patching node with cloud ip addresses = [%v]", err)
}
}
// Monitor node queries the cloudprovider for non-ready nodes and deletes them
// if they cannot be found in the cloud provider
func (cnc *CloudNodeController) MonitorNode() {
instances, ok := cnc.cloud.Instances()
if !ok {
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
return
}
nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
glog.Errorf("Error monitoring node status: %v", err)
return
}
for i := range nodes.Items {
var currentReadyCondition *v1.NodeCondition
node := &nodes.Items[i]
// Try to get the current node status
// If node status is empty, then kubelet has not posted ready status yet. In this case, process next node
for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
_, currentReadyCondition = nodeutilv1.GetNodeCondition(&node.Status, v1.NodeReady)
if currentReadyCondition != nil {
break
}
name := node.Name
node, err = cnc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
break
}
time.Sleep(retrySleepTime)
}
if currentReadyCondition == nil {
glog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name)
continue
}
// If the known node status says that Node is NotReady, then check if the node has been removed
// from the cloud provider. If node cannot be found in cloudprovider, then delete the node immediately
if currentReadyCondition != nil {
if currentReadyCondition.Status != v1.ConditionTrue {
// we need to check this first to get taint working in similar in all cloudproviders
// current problem is that shutdown nodes are not working in similar way ie. all cloudproviders
// does not delete node from kubernetes cluster when instance it is shutdown see issue #46442
shutdown, err := nodectrlutil.ShutdownInCloudProvider(context.TODO(), cnc.cloud, node)
if err != nil {
glog.Errorf("Error getting data for node %s from cloud: %v", node.Name, err)
}
if shutdown && err == nil {
// if node is shutdown add shutdown taint
err = controller.AddOrUpdateTaintOnNode(cnc.kubeClient, node.Name, controller.ShutdownTaint)
if err != nil {
glog.Errorf("Error patching node taints: %v", err)
}
// Continue checking the remaining nodes since the current one is shutdown.
continue
}
// Check with the cloud provider to see if the node still exists. If it
// doesn't, delete the node immediately.
exists, err := ensureNodeExistsByProviderID(instances, node)
if err != nil {
glog.Errorf("Error getting data for node %s from cloud: %v", node.Name, err)
continue
}
if exists {
// Continue checking the remaining nodes since the current one is fine.
continue
}
glog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name)
ref := &v1.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: types.UID(node.UID),
Namespace: "",
}
glog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name)
cnc.recorder.Eventf(ref, v1.EventTypeNormal, fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name), "Node %s event: %s", node.Name, "DeletingNode")
go func(nodeName string) {
defer utilruntime.HandleCrash()
if err := cnc.kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil {
glog.Errorf("unable to delete node %q: %v", nodeName, err)
}
}(node.Name)
} else {
// if taint exist remove taint
err = controller.RemoveTaintOffNode(cnc.kubeClient, node.Name, node, controller.ShutdownTaint)
if err != nil {
glog.Errorf("Error patching node taints: %v", err)
}
}
}
}
}
func (cnc *CloudNodeController) UpdateCloudNode(_, newObj interface{}) {
if _, ok := newObj.(*v1.Node); !ok {
utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj))
return
}
cnc.AddCloudNode(newObj)
}
// This processes nodes that were added into the cluster, and cloud initialize them if appropriate
func (cnc *CloudNodeController) AddCloudNode(obj interface{}) {
node := obj.(*v1.Node)
cloudTaint := getCloudTaint(node.Spec.Taints)
if cloudTaint == nil {
glog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name)
return
}
instances, ok := cnc.cloud.Instances()
if !ok {
utilruntime.HandleError(fmt.Errorf("failed to get instances from cloud provider"))
return
}
err := clientretry.RetryOnConflict(UpdateNodeSpecBackoff, func() error {
curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil {
return err
}
if curNode.Spec.ProviderID == "" {
providerID, err := cloudprovider.GetInstanceProviderID(context.TODO(), cnc.cloud, types.NodeName(curNode.Name))
if err == nil {
curNode.Spec.ProviderID = providerID
} else {
// we should attempt to set providerID on curNode, but
// we can continue if we fail since we will attempt to set
// node addresses given the node name in getNodeAddressesByProviderIDOrName
glog.Errorf("failed to set node provider id: %v", err)
}
}
nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, curNode)
if err != nil {
return err
}
// If user provided an IP address, ensure that IP address is found
// in the cloud provider before removing the taint on the node
if nodeIP, ok := ensureNodeProvidedIPExists(curNode, nodeAddresses); ok {
if nodeIP == nil {
return errors.New("failed to find kubelet node IP from cloud provider")
}
}
if instanceType, err := getInstanceTypeByProviderIDOrName(instances, curNode); err != nil {
return err
} else if instanceType != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
curNode.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType
}
// TODO(wlan0): Move this logic to the route controller using the node taint instead of condition
// Since there are node taints, do we still need this?
// This condition marks the node as unusable until routes are initialized in the cloud provider
if cnc.cloud.ProviderName() == "gce" {
curNode.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
Reason: "NoRouteCreated",
Message: "Node created without a route",
LastTransitionTime: metav1.Now(),
})
}
if zones, ok := cnc.cloud.Zones(); ok {
zone, err := getZoneByProviderIDOrName(zones, curNode)
if err != nil {
return fmt.Errorf("failed to get zone from cloud provider: %v", err)
}
if zone.FailureDomain != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain
}
if zone.Region != "" {
glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
curNode.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region
}
}
curNode.Spec.Taints = excludeTaintFromList(curNode.Spec.Taints, *cloudTaint)
_, err = cnc.kubeClient.CoreV1().Nodes().Update(curNode)
if err != nil {
return err
}
// After adding, call UpdateNodeAddress to set the CloudProvider provided IPAddresses
// So that users do not see any significant delay in IP addresses being filled into the node
cnc.updateNodeAddress(curNode, instances)
return nil
})
if err != nil {
utilruntime.HandleError(err)
return
}
glog.Infof("Successfully initialized node %s with cloud provider", node.Name)
}
func getCloudTaint(taints []v1.Taint) *v1.Taint {
for _, taint := range taints {
if taint.Key == algorithm.TaintExternalCloudProvider {
return &taint
}
}
return nil
}
func excludeTaintFromList(taints []v1.Taint, toExclude v1.Taint) []v1.Taint {
newTaints := []v1.Taint{}
for _, taint := range taints {
if toExclude.MatchTaint(&taint) {
continue
}
newTaints = append(newTaints, taint)
}
return newTaints
}
// ensureNodeExistsByProviderID checks if the instance exists by the provider id,
// If provider id in spec is empty it calls instanceId with node name to get provider id
func ensureNodeExistsByProviderID(instances cloudprovider.Instances, node *v1.Node) (bool, error) {
providerID := node.Spec.ProviderID
if providerID == "" {
var err error
providerID, err = instances.InstanceID(context.TODO(), types.NodeName(node.Name))
if err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}
if providerID == "" {
glog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name)
return false, nil
}
}
return instances.InstanceExistsByProviderID(context.TODO(), providerID)
}
func getNodeAddressesByProviderIDOrName(instances cloudprovider.Instances, node *v1.Node) ([]v1.NodeAddress, error) {
nodeAddresses, err := instances.NodeAddressesByProviderID(context.TODO(), node.Spec.ProviderID)
if err != nil {
providerIDErr := err
nodeAddresses, err = instances.NodeAddresses(context.TODO(), types.NodeName(node.Name))
if err != nil {
return nil, fmt.Errorf("NodeAddress: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
}
}
return nodeAddresses, nil
}
func nodeAddressesChangeDetected(addressSet1, addressSet2 []v1.NodeAddress) bool {
if len(addressSet1) != len(addressSet2) {
return true
}
addressMap1 := map[v1.NodeAddressType]string{}
addressMap2 := map[v1.NodeAddressType]string{}
for i := range addressSet1 {
addressMap1[addressSet1[i].Type] = addressSet1[i].Address
addressMap2[addressSet2[i].Type] = addressSet2[i].Address
}
for k, v := range addressMap1 {
if addressMap2[k] != v {
return true
}
}
return false
}
func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) (*v1.NodeAddress, bool) {
var nodeIP *v1.NodeAddress
nodeIPExists := false
if providedIP, ok := node.ObjectMeta.Annotations[kubeletapis.AnnotationProvidedIPAddr]; ok {
nodeIPExists = true
for i := range nodeAddresses {
if nodeAddresses[i].Address == providedIP {
nodeIP = &nodeAddresses[i]
break
}
}
}
return nodeIP, nodeIPExists
}
func getInstanceTypeByProviderIDOrName(instances cloudprovider.Instances, node *v1.Node) (string, error) {
instanceType, err := instances.InstanceTypeByProviderID(context.TODO(), node.Spec.ProviderID)
if err != nil {
providerIDErr := err
instanceType, err = instances.InstanceType(context.TODO(), types.NodeName(node.Name))
if err != nil {
return "", fmt.Errorf("InstanceType: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
}
}
return instanceType, err
}
// getZoneByProviderIDorName will attempt to get the zone of node using its providerID
// then it's name. If both attempts fail, an error is returned
func getZoneByProviderIDOrName(zones cloudprovider.Zones, node *v1.Node) (cloudprovider.Zone, error) {
zone, err := zones.GetZoneByProviderID(context.TODO(), node.Spec.ProviderID)
if err != nil {
providerIDErr := err
zone, err = zones.GetZoneByNodeName(context.TODO(), types.NodeName(node.Name))
if err != nil {
return cloudprovider.Zone{}, fmt.Errorf("Zone: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err)
}
}
return zone, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,282 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
)
const initializerName = "pvlabel.kubernetes.io"
// PersistentVolumeLabelController handles adding labels to persistent volumes when they are created
type PersistentVolumeLabelController struct {
cloud cloudprovider.Interface
kubeClient kubernetes.Interface
pvlController cache.Controller
pvlIndexer cache.Indexer
volumeLister corelisters.PersistentVolumeLister
syncHandler func(key string) error
// queue is where incoming work is placed to de-dup and to allow "easy" rate limited requeues on errors
queue workqueue.RateLimitingInterface
}
// NewPersistentVolumeLabelController creates a PersistentVolumeLabelController object
func NewPersistentVolumeLabelController(
kubeClient kubernetes.Interface,
cloud cloudprovider.Interface) *PersistentVolumeLabelController {
pvlc := &PersistentVolumeLabelController{
cloud: cloud,
kubeClient: kubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvLabels"),
}
pvlc.syncHandler = pvlc.addLabels
pvlc.pvlIndexer, pvlc.pvlController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.IncludeUninitialized = true
return kubeClient.CoreV1().PersistentVolumes().List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.IncludeUninitialized = true
return kubeClient.CoreV1().PersistentVolumes().Watch(options)
},
},
&v1.PersistentVolume{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
pvlc.queue.Add(key)
}
},
},
cache.Indexers{},
)
pvlc.volumeLister = corelisters.NewPersistentVolumeLister(pvlc.pvlIndexer)
return pvlc
}
// Run starts a controller that adds labels to persistent volumes
func (pvlc *PersistentVolumeLabelController) Run(threadiness int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer pvlc.queue.ShutDown()
glog.Infof("Starting PersistentVolumeLabelController")
defer glog.Infof("Shutting down PersistentVolumeLabelController")
go pvlc.pvlController.Run(stopCh)
if !controller.WaitForCacheSync("persistent volume label", stopCh, pvlc.pvlController.HasSynced) {
return
}
// start up your worker threads based on threadiness. Some controllers have multiple kinds of workers
for i := 0; i < threadiness; i++ {
// runWorker will loop until "something bad" happens. The .Until will then rekick the worker
// after one second
go wait.Until(pvlc.runWorker, time.Second, stopCh)
}
// wait until we're told to stop
<-stopCh
}
func (pvlc *PersistentVolumeLabelController) runWorker() {
// hot loop until we're told to stop. processNextWorkItem will automatically wait until there's work
// available, so we don't worry about secondary waits
for pvlc.processNextWorkItem() {
}
}
// processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.
func (pvlc *PersistentVolumeLabelController) processNextWorkItem() bool {
// pull the next work item from queue. It should be a key we use to lookup something in a cache
keyObj, quit := pvlc.queue.Get()
if quit {
return false
}
// you always have to indicate to the queue that you've completed a piece of work
defer pvlc.queue.Done(keyObj)
key := keyObj.(string)
// do your work on the key. This method will contains your "do stuff" logic
err := pvlc.syncHandler(key)
if err == nil {
// if you had no error, tell the queue to stop tracking history for your key. This will
// reset things like failure counts for per-item rate limiting
pvlc.queue.Forget(key)
return true
}
// there was a failure so be sure to report it. This method allows for pluggable error handling
// which can be used for things like cluster-monitoring
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", key, err))
// since we failed, we should requeue the item to work on later. This method will add a backoff
// to avoid hotlooping on particular items (they're probably still not going to work right away)
// and overall controller protection (everything I've done is broken, this controller needs to
// calm down or it can starve other useful work) cases.
pvlc.queue.AddRateLimited(key)
return true
}
// AddLabels adds appropriate labels to persistent volumes and sets the
// volume as available if successful.
func (pvlc *PersistentVolumeLabelController) addLabels(key string) error {
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return fmt.Errorf("error getting name of volume %q to get volume from informer: %v", key, err)
}
volume, err := pvlc.volumeLister.Get(name)
if errors.IsNotFound(err) {
return nil
} else if err != nil {
return fmt.Errorf("error getting volume %s from informer: %v", name, err)
}
return pvlc.addLabelsToVolume(volume)
}
func (pvlc *PersistentVolumeLabelController) addLabelsToVolume(vol *v1.PersistentVolume) error {
var volumeLabels map[string]string
// Only add labels if the next pending initializer.
if needsInitialization(vol.Initializers, initializerName) {
if labeler, ok := (pvlc.cloud).(cloudprovider.PVLabeler); ok {
labels, err := labeler.GetLabelsForVolume(context.TODO(), vol)
if err != nil {
return fmt.Errorf("error querying volume %v: %v", vol.Spec, err)
}
volumeLabels = labels
} else {
glog.V(4).Info("cloud provider does not support PVLabeler")
}
return pvlc.updateVolume(vol, volumeLabels)
}
return nil
}
func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolume, volLabels map[string]string) ([]byte, error) {
volName := vol.Name
newVolume := vol.DeepCopyObject().(*v1.PersistentVolume)
if newVolume.Labels == nil {
newVolume.Labels = make(map[string]string)
}
for k, v := range volLabels {
newVolume.Labels[k] = v
}
newVolume.Initializers = removeInitializer(newVolume.Initializers, initializerName)
glog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name)
oldData, err := json.Marshal(vol)
if err != nil {
return nil, fmt.Errorf("failed to marshal old persistentvolume %#v for persistentvolume %q: %v", vol, volName, err)
}
newData, err := json.Marshal(newVolume)
if err != nil {
return nil, fmt.Errorf("failed to marshal new persistentvolume %#v for persistentvolume %q: %v", newVolume, volName, err)
}
patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.PersistentVolume{})
if err != nil {
return nil, fmt.Errorf("failed to create patch for persistentvolume %q: %v", volName, err)
}
return patch, nil
}
func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolume, volLabels map[string]string) error {
volName := vol.Name
glog.V(4).Infof("updating PersistentVolume %s", volName)
patchBytes, err := pvlc.createPatch(vol, volLabels)
if err != nil {
return err
}
_, err = pvlc.kubeClient.CoreV1().PersistentVolumes().Patch(string(volName), types.StrategicMergePatchType, patchBytes)
if err != nil {
return fmt.Errorf("failed to update PersistentVolume %s: %v", volName, err)
}
glog.V(4).Infof("updated PersistentVolume %s", volName)
return err
}
func removeInitializer(initializers *metav1.Initializers, name string) *metav1.Initializers {
if initializers == nil {
return nil
}
var updated []metav1.Initializer
for _, pending := range initializers.Pending {
if pending.Name != name {
updated = append(updated, pending)
}
}
if len(updated) == len(initializers.Pending) {
return initializers
}
if len(updated) == 0 {
return nil
}
return &metav1.Initializers{Pending: updated}
}
// needsInitialization checks whether or not the PVL is the next pending initializer.
func needsInitialization(initializers *metav1.Initializers, name string) bool {
if initializers == nil {
return false
}
if len(initializers.Pending) == 0 {
return false
}
// There is at least one initializer still pending so check to
// see if the PVL is the next in line.
return initializers.Pending[0].Name == name
}

View File

@ -1,193 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"encoding/json"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
)
func TestCreatePatch(t *testing.T) {
ignoredPV := v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "noncloud",
Initializers: &metav1.Initializers{
Pending: []metav1.Initializer{
{
Name: initializerName,
},
},
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/",
},
},
},
}
awsPV := v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "awsPV",
Initializers: &metav1.Initializers{
Pending: []metav1.Initializer{
{
Name: initializerName,
},
},
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "123",
},
},
},
}
testCases := map[string]struct {
vol v1.PersistentVolume
labels map[string]string
}{
"non-cloud PV": {
vol: ignoredPV,
labels: nil,
},
"no labels": {
vol: awsPV,
labels: nil,
},
"cloudprovider returns nil, nil": {
vol: awsPV,
labels: nil,
},
"cloudprovider labels": {
vol: awsPV,
labels: map[string]string{"a": "1", "b": "2"},
},
}
for d, tc := range testCases {
cloud := &fakecloud.FakeCloud{}
client := fake.NewSimpleClientset()
pvlController := NewPersistentVolumeLabelController(client, cloud)
patch, err := pvlController.createPatch(&tc.vol, tc.labels)
if err != nil {
t.Errorf("%s: createPatch returned err: %v", d, err)
}
obj := &v1.PersistentVolume{}
json.Unmarshal(patch, obj)
if tc.labels != nil {
for k, v := range tc.labels {
if obj.ObjectMeta.Labels[k] != v {
t.Errorf("%s: label %s expected %s got %s", d, k, v, obj.ObjectMeta.Labels[k])
}
}
}
if obj.ObjectMeta.Initializers != nil {
t.Errorf("%s: initializer wasn't removed: %v", d, obj.ObjectMeta.Initializers)
}
}
}
func TestAddLabelsToVolume(t *testing.T) {
pv := v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "awsPV",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "123",
},
},
},
}
testCases := map[string]struct {
vol v1.PersistentVolume
initializers *metav1.Initializers
shouldLabel bool
}{
"PV without initializer": {
vol: pv,
initializers: nil,
shouldLabel: false,
},
"PV with initializer to remove": {
vol: pv,
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: initializerName}}},
shouldLabel: true,
},
"PV with other initializers only": {
vol: pv,
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "OtherInit"}}},
shouldLabel: false,
},
"PV with other initializers first": {
vol: pv,
initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "OtherInit"}, {Name: initializerName}}},
shouldLabel: false,
},
}
for d, tc := range testCases {
labeledCh := make(chan bool, 1)
client := fake.NewSimpleClientset()
client.PrependReactor("patch", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
patch := action.(core.PatchActionImpl).GetPatch()
obj := &v1.PersistentVolume{}
json.Unmarshal(patch, obj)
if obj.ObjectMeta.Labels["a"] != "1" {
return false, nil, nil
}
labeledCh <- true
return true, nil, nil
})
fakeCloud := &fakecloud.FakeCloud{
VolumeLabelMap: map[string]map[string]string{"awsPV": {"a": "1"}},
}
pvlController := &PersistentVolumeLabelController{kubeClient: client, cloud: fakeCloud}
tc.vol.ObjectMeta.Initializers = tc.initializers
pvlController.addLabelsToVolume(&tc.vol)
select {
case l := <-labeledCh:
if l != tc.shouldLabel {
t.Errorf("%s: label of pv failed. expected %t got %t", d, tc.shouldLabel, l)
}
case <-time.After(500 * time.Millisecond):
if tc.shouldLabel != false {
t.Errorf("%s: timed out waiting for label notification", d)
}
}
}
}

View File

@ -1,56 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["clusterroleaggregation_controller.go"],
importpath = "k8s.io/kubernetes/pkg/controller/clusterroleaggregation",
visibility = ["//visibility:public"],
deps = [
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/rbac/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["clusterroleaggregation_controller_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
],
)

View File

@ -1,213 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterroleaggregation
import (
"fmt"
"sort"
"time"
"github.com/golang/glog"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rbacinformers "k8s.io/client-go/informers/rbac/v1"
rbacclient "k8s.io/client-go/kubernetes/typed/rbac/v1"
rbaclisters "k8s.io/client-go/listers/rbac/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
)
// ClusterRoleAggregationController is a controller to combine cluster roles
type ClusterRoleAggregationController struct {
clusterRoleClient rbacclient.ClusterRolesGetter
clusterRoleLister rbaclisters.ClusterRoleLister
clusterRolesSynced cache.InformerSynced
syncHandler func(key string) error
queue workqueue.RateLimitingInterface
}
// NewClusterRoleAggregation creates a new controller
func NewClusterRoleAggregation(clusterRoleInformer rbacinformers.ClusterRoleInformer, clusterRoleClient rbacclient.ClusterRolesGetter) *ClusterRoleAggregationController {
c := &ClusterRoleAggregationController{
clusterRoleClient: clusterRoleClient,
clusterRoleLister: clusterRoleInformer.Lister(),
clusterRolesSynced: clusterRoleInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ClusterRoleAggregator"),
}
c.syncHandler = c.syncClusterRole
clusterRoleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
c.enqueue()
},
UpdateFunc: func(old, cur interface{}) {
c.enqueue()
},
DeleteFunc: func(uncast interface{}) {
c.enqueue()
},
})
return c
}
func (c *ClusterRoleAggregationController) syncClusterRole(key string) error {
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
sharedClusterRole, err := c.clusterRoleLister.Get(name)
if errors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
if sharedClusterRole.AggregationRule == nil {
return nil
}
newPolicyRules := []rbacv1.PolicyRule{}
for i := range sharedClusterRole.AggregationRule.ClusterRoleSelectors {
selector := sharedClusterRole.AggregationRule.ClusterRoleSelectors[i]
runtimeLabelSelector, err := metav1.LabelSelectorAsSelector(&selector)
if err != nil {
return err
}
clusterRoles, err := c.clusterRoleLister.List(runtimeLabelSelector)
if err != nil {
return err
}
sort.Sort(byName(clusterRoles))
for i := range clusterRoles {
if clusterRoles[i].Name == sharedClusterRole.Name {
continue
}
for j := range clusterRoles[i].Rules {
currRule := clusterRoles[i].Rules[j]
if !ruleExists(newPolicyRules, currRule) {
newPolicyRules = append(newPolicyRules, currRule)
}
}
}
}
if equality.Semantic.DeepEqual(newPolicyRules, sharedClusterRole.Rules) {
return nil
}
// we need to update
clusterRole := sharedClusterRole.DeepCopy()
clusterRole.Rules = nil
for _, rule := range newPolicyRules {
clusterRole.Rules = append(clusterRole.Rules, *rule.DeepCopy())
}
_, err = c.clusterRoleClient.ClusterRoles().Update(clusterRole)
return err
}
func ruleExists(haystack []rbacv1.PolicyRule, needle rbacv1.PolicyRule) bool {
for _, curr := range haystack {
if equality.Semantic.DeepEqual(curr, needle) {
return true
}
}
return false
}
// Run starts the controller and blocks until stopCh is closed.
func (c *ClusterRoleAggregationController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
glog.Infof("Starting ClusterRoleAggregator")
defer glog.Infof("Shutting down ClusterRoleAggregator")
if !controller.WaitForCacheSync("ClusterRoleAggregator", stopCh, c.clusterRolesSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
}
func (c *ClusterRoleAggregationController) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *ClusterRoleAggregationController) processNextWorkItem() bool {
dsKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(dsKey)
err := c.syncHandler(dsKey.(string))
if err == nil {
c.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
c.queue.AddRateLimited(dsKey)
return true
}
func (c *ClusterRoleAggregationController) enqueue() {
// this is unusual, but since the set of all clusterroles is small and we don't know the dependency
// graph, just queue up every thing each time. This allows errors to be selectively retried if there
// is a problem updating a single role
allClusterRoles, err := c.clusterRoleLister.List(labels.Everything())
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't list all objects %v", err))
return
}
for _, clusterRole := range allClusterRoles {
// only queue ones that we may need to aggregate
if clusterRole.AggregationRule == nil {
continue
}
key, err := controller.KeyFunc(clusterRole)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", clusterRole, err))
return
}
c.queue.Add(key)
}
}
type byName []*rbacv1.ClusterRole
func (a byName) Len() int { return len(a) }
func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name }

View File

@ -1,182 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusterroleaggregation
import (
"testing"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
fakeclient "k8s.io/client-go/kubernetes/fake"
rbaclisters "k8s.io/client-go/listers/rbac/v1"
clienttesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/controller"
)
func TestSyncClusterRole(t *testing.T) {
hammerRules := func() []rbacv1.PolicyRule {
return []rbacv1.PolicyRule{
{Verbs: []string{"hammer"}, Resources: []string{"nails"}},
{Verbs: []string{"hammer"}, Resources: []string{"wedges"}},
}
}
chiselRules := func() []rbacv1.PolicyRule {
return []rbacv1.PolicyRule{
{Verbs: []string{"chisel"}, Resources: []string{"mortises"}},
}
}
sawRules := func() []rbacv1.PolicyRule {
return []rbacv1.PolicyRule{
{Verbs: []string{"saw"}, Resources: []string{"boards"}},
}
}
role := func(name string, labels map[string]string, rules []rbacv1.PolicyRule) *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: name, Labels: labels},
Rules: rules,
}
}
combinedRole := func(selectors []map[string]string, rules ...[]rbacv1.PolicyRule) *rbacv1.ClusterRole {
ret := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "combined"},
AggregationRule: &rbacv1.AggregationRule{},
}
for _, selector := range selectors {
ret.AggregationRule.ClusterRoleSelectors = append(ret.AggregationRule.ClusterRoleSelectors,
metav1.LabelSelector{MatchLabels: selector})
}
for _, currRules := range rules {
ret.Rules = append(ret.Rules, currRules...)
}
return ret
}
tests := []struct {
name string
startingClusterRoles []*rbacv1.ClusterRole
clusterRoleToSync string
expectedClusterRole *rbacv1.ClusterRole
}{
{
name: "remove dead rules",
startingClusterRoles: []*rbacv1.ClusterRole{
role("hammer", map[string]string{"foo": "bar"}, hammerRules()),
combinedRole([]map[string]string{{"foo": "bar"}}, sawRules()),
},
clusterRoleToSync: "combined",
expectedClusterRole: combinedRole([]map[string]string{{"foo": "bar"}}, hammerRules()),
},
{
name: "strip rules",
startingClusterRoles: []*rbacv1.ClusterRole{
role("hammer", map[string]string{"foo": "not-bar"}, hammerRules()),
combinedRole([]map[string]string{{"foo": "bar"}}, hammerRules()),
},
clusterRoleToSync: "combined",
expectedClusterRole: combinedRole([]map[string]string{{"foo": "bar"}}),
},
{
name: "select properly and put in order",
startingClusterRoles: []*rbacv1.ClusterRole{
role("hammer", map[string]string{"foo": "bar"}, hammerRules()),
role("chisel", map[string]string{"foo": "bar"}, chiselRules()),
role("saw", map[string]string{"foo": "not-bar"}, sawRules()),
combinedRole([]map[string]string{{"foo": "bar"}}),
},
clusterRoleToSync: "combined",
expectedClusterRole: combinedRole([]map[string]string{{"foo": "bar"}}, chiselRules(), hammerRules()),
},
{
name: "select properly with multiple selectors",
startingClusterRoles: []*rbacv1.ClusterRole{
role("hammer", map[string]string{"foo": "bar"}, hammerRules()),
role("chisel", map[string]string{"foo": "bar"}, chiselRules()),
role("saw", map[string]string{"foo": "not-bar"}, sawRules()),
combinedRole([]map[string]string{{"foo": "bar"}, {"foo": "not-bar"}}),
},
clusterRoleToSync: "combined",
expectedClusterRole: combinedRole([]map[string]string{{"foo": "bar"}, {"foo": "not-bar"}}, chiselRules(), hammerRules(), sawRules()),
},
{
name: "select properly remove duplicates",
startingClusterRoles: []*rbacv1.ClusterRole{
role("hammer", map[string]string{"foo": "bar"}, hammerRules()),
role("chisel", map[string]string{"foo": "bar"}, chiselRules()),
role("saw", map[string]string{"foo": "bar"}, sawRules()),
role("other-saw", map[string]string{"foo": "not-bar"}, sawRules()),
combinedRole([]map[string]string{{"foo": "bar"}, {"foo": "not-bar"}}),
},
clusterRoleToSync: "combined",
expectedClusterRole: combinedRole([]map[string]string{{"foo": "bar"}, {"foo": "not-bar"}}, chiselRules(), hammerRules(), sawRules()),
},
{
name: "no diff skip",
startingClusterRoles: []*rbacv1.ClusterRole{
role("hammer", map[string]string{"foo": "bar"}, hammerRules()),
combinedRole([]map[string]string{{"foo": "bar"}}, hammerRules()),
},
clusterRoleToSync: "combined",
expectedClusterRole: nil,
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
indexer := cache.NewIndexer(controller.KeyFunc, cache.Indexers{})
objs := []runtime.Object{}
for _, obj := range test.startingClusterRoles {
objs = append(objs, obj)
indexer.Add(obj)
}
fakeClient := fakeclient.NewSimpleClientset(objs...)
c := ClusterRoleAggregationController{
clusterRoleClient: fakeClient.RbacV1(),
clusterRoleLister: rbaclisters.NewClusterRoleLister(indexer),
}
err := c.syncClusterRole(test.clusterRoleToSync)
if err != nil {
t.Fatal(err)
}
if test.expectedClusterRole == nil {
if len(fakeClient.Actions()) != 0 {
t.Fatalf("unexpected actions %#v", fakeClient.Actions())
}
return
}
if len(fakeClient.Actions()) != 1 {
t.Fatalf("unexpected actions %#v", fakeClient.Actions())
}
action := fakeClient.Actions()[0]
if !action.Matches("update", "clusterroles") {
t.Fatalf("unexpected action %#v", action)
}
updateAction, ok := action.(clienttesting.UpdateAction)
if !ok {
t.Fatalf("unexpected action %#v", action)
}
if !equality.Semantic.DeepEqual(updateAction.GetObject().(*rbacv1.ClusterRole), test.expectedClusterRole) {
t.Fatalf("%v", diff.ObjectDiff(test.expectedClusterRole, updateAction.GetObject().(*rbacv1.ClusterRole)))
}
})
}
}

View File

@ -1,501 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"sync"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
type BaseControllerRefManager struct {
Controller metav1.Object
Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
CanAdoptFunc func() error
}
func (m *BaseControllerRefManager) CanAdopt() error {
m.canAdoptOnce.Do(func() {
if m.CanAdoptFunc != nil {
m.canAdoptErr = m.CanAdoptFunc()
}
})
return m.canAdoptErr
}
// ClaimObject tries to take ownership of an object for this controller.
//
// It will reconcile the following:
// * Adopt orphans if the match function returns true.
// * Release owned objects if the match function returns false.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The returned boolean indicates whether you now
// own the object.
//
// No reconciliation will be attempted if the controller is being deleted.
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef != nil {
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else. Ignore.
return false, nil
}
if match(obj) {
// We already own it and the selector matches.
// Return true (successfully claimed) before checking deletion timestamp.
// We're still allowed to claim things we already own while being deleted
// because doing so requires taking no actions.
return true, nil
}
// Owned by us but selector doesn't match.
// Try to release, unless we're being deleted.
if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(obj); err != nil {
// If the pod no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else released it, or there was a transient error.
// The controller should requeue and try again if it's still stale.
return false, err
}
// Successfully released.
return false, nil
}
// It's an orphan.
if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
if obj.GetDeletionTimestamp() != nil {
// Ignore if the object is being deleted
return false, nil
}
// Selector matches. Try to adopt.
if err := adopt(obj); err != nil {
// If the pod no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else claimed it first, or there was a transient error.
// The controller should requeue and try again if it's still orphaned.
return false, err
}
// Successfully adopted.
return true, nil
}
type PodControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
podControl PodControlInterface
}
// NewPodControllerRefManager returns a PodControllerRefManager that exposes
// methods to manage the controllerRef of pods.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// PodControllerRefManager instance. Create a new instance if it makes
// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewPodControllerRefManager(
podControl PodControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func() error,
) *PodControllerRefManager {
return &PodControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
podControl: podControl,
}
}
// ClaimPods tries to take ownership of a list of Pods.
//
// It will reconcile the following:
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
// Optional: If one or more filters are specified, a Pod will only be claimed if
// all filters return true.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of Pods that you now own is returned.
func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) {
var claimed []*v1.Pod
var errlist []error
match := func(obj metav1.Object) bool {
pod := obj.(*v1.Pod)
// Check selector first so filters only run on potentially matching Pods.
if !m.Selector.Matches(labels.Set(pod.Labels)) {
return false
}
for _, filter := range filters {
if !filter(pod) {
return false
}
}
return true
}
adopt := func(obj metav1.Object) error {
return m.AdoptPod(obj.(*v1.Pod))
}
release := func(obj metav1.Object) error {
return m.ReleasePod(obj.(*v1.Pod))
}
for _, pod := range pods {
ok, err := m.ClaimObject(pod, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, pod)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptPod sends a patch to take control of the pod. It returns the error if
// the patching fails.
func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.Controller.GetName(), m.Controller.GetUID(), pod.UID)
return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch))
}
// ReleasePod sends a patch to free the pod from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID)
err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
// If the pod no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the pod
// has no owner reference, 2. the uid of the pod doesn't
// match, which means the pod is deleted and then recreated.
// In both cases, the error can be ignored.
// TODO: If the pod has owner references, but none of them
// has the owner.UID, server will silently ignore the patch.
// Investigate why.
return nil
}
}
return err
}
// ReplicaSetControllerRefManager is used to manage controllerRef of ReplicaSets.
// Three methods are defined on this object 1: Classify 2: AdoptReplicaSet and
// 3: ReleaseReplicaSet which are used to classify the ReplicaSets into appropriate
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ReplicaSetControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
rsControl RSControlInterface
}
// NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes
// methods to manage the controllerRef of ReplicaSets.
//
// The CanAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
// ReplicaSetControllerRefManager instance. Create a new instance if it
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewReplicaSetControllerRefManager(
rsControl RSControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func() error,
) *ReplicaSetControllerRefManager {
return &ReplicaSetControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
rsControl: rsControl,
}
}
// ClaimReplicaSets tries to take ownership of a list of ReplicaSets.
//
// It will reconcile the following:
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ReplicaSets that you now own is
// returned.
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
var claimed []*apps.ReplicaSet
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptReplicaSet(obj.(*apps.ReplicaSet))
}
release := func(obj metav1.Object) error {
return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet))
}
for _, rs := range sets {
ok, err := m.ClaimObject(rs, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, rs)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
// the error if the patching fails.
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.Controller.GetName(), m.Controller.GetUID(), rs.UID)
return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, []byte(addControllerPatch))
}
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error {
glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID)
err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
// If the ReplicaSet no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ReplicaSet
// has no owner reference, 2. the uid of the ReplicaSet doesn't
// match, which means the ReplicaSet is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
return func() error {
obj, err := getObject()
if err != nil {
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
}
if obj.GetDeletionTimestamp() != nil {
return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp())
}
return nil
}
}
// ControllerRevisionControllerRefManager is used to manage controllerRef of ControllerRevisions.
// Three methods are defined on this object 1: Classify 2: AdoptControllerRevision and
// 3: ReleaseControllerRevision which are used to classify the ControllerRevisions into appropriate
// categories and accordingly adopt or release them. See comments on these functions
// for more details.
type ControllerRevisionControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
crControl ControllerRevisionControlInterface
}
// NewControllerRevisionControllerRefManager returns a ControllerRevisionControllerRefManager that exposes
// methods to manage the controllerRef of ControllerRevisions.
//
// The canAdopt() function can be used to perform a potentially expensive check
// (such as a live GET from the API server) prior to the first adoption.
// It will only be called (at most once) if an adoption is actually attempted.
// If canAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once canAdopt() is called, it will not be called again by the same
// ControllerRevisionControllerRefManager instance. Create a new instance if it
// makes sense to check canAdopt() again (e.g. in a different sync pass).
func NewControllerRevisionControllerRefManager(
crControl ControllerRevisionControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func() error,
) *ControllerRevisionControllerRefManager {
return &ControllerRevisionControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
crControl: crControl,
}
}
// ClaimControllerRevisions tries to take ownership of a list of ControllerRevisions.
//
// It will reconcile the following:
// * Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches.
//
// A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation
// is still needed.
//
// If the error is nil, either the reconciliation succeeded, or no
// reconciliation was necessary. The list of ControllerRevisions that you now own is
// returned.
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
var claimed []*apps.ControllerRevision
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
}
release := func(obj metav1.Object) error {
return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
}
for _, h := range histories {
ok, err := m.ClaimObject(h, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, h)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
// the patching fails.
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
addControllerPatch := fmt.Sprintf(
`{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
m.controllerKind.GroupVersion(), m.controllerKind.Kind,
m.Controller.GetName(), m.Controller.GetUID(), history.UID)
return m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(addControllerPatch))
}
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)
err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch))
if err != nil {
if errors.IsNotFound(err) {
// If the ControllerRevision no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ControllerRevision
// has no owner reference, 2. the uid of the ControllerRevision doesn't
// match, which means the ControllerRevision is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}

View File

@ -1,181 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"reflect"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
)
var (
productionLabel = map[string]string{"type": "production"}
testLabel = map[string]string{"type": "testing"}
productionLabelSelector = labels.Set{"type": "production"}.AsSelector()
testLabelSelector = labels.Set{"type": "testing"}.AsSelector()
controllerUID = "123"
)
func newPod(podName string, label map[string]string, owner metav1.Object) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: label,
Namespace: metav1.NamespaceDefault,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
}
if owner != nil {
pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(owner, apps.SchemeGroupVersion.WithKind("Fake"))}
}
return pod
}
func TestClaimPods(t *testing.T) {
controllerKind := schema.GroupVersionKind{}
type test struct {
name string
manager *PodControllerRefManager
pods []*v1.Pod
filters []func(*v1.Pod) bool
claimed []*v1.Pod
released []*v1.Pod
}
var tests = []test{
{
name: "Claim pods with correct label",
manager: NewPodControllerRefManager(&FakePodControl{},
&v1.ReplicationController{},
productionLabelSelector,
controllerKind,
func() error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", testLabel, nil)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, nil)},
},
func() test {
controller := v1.ReplicationController{}
controller.UID = types.UID(controllerUID)
now := metav1.Now()
controller.DeletionTimestamp = &now
return test{
name: "Controller marked for deletion can not claim pods",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func() error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, nil), newPod("pod2", productionLabel, nil)},
claimed: nil,
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.UID = types.UID(controllerUID)
now := metav1.Now()
controller.DeletionTimestamp = &now
return test{
name: "Controller marked for deletion can not claim new pods",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func() error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, nil)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
}
}(),
func() test {
controller := v1.ReplicationController{}
controller2 := v1.ReplicationController{}
controller.UID = types.UID(controllerUID)
controller2.UID = types.UID("AAAAA")
return test{
name: "Controller can not claim pods owned by another controller",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func() error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", productionLabel, &controller2)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.UID = types.UID(controllerUID)
return test{
name: "Controller releases claimed pods when selector doesn't match",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func() error { return nil }),
pods: []*v1.Pod{newPod("pod1", productionLabel, &controller), newPod("pod2", testLabel, &controller)},
claimed: []*v1.Pod{newPod("pod1", productionLabel, &controller)},
}
}(),
func() test {
controller := v1.ReplicationController{}
controller.UID = types.UID(controllerUID)
podToDelete1 := newPod("pod1", productionLabel, &controller)
podToDelete2 := newPod("pod2", productionLabel, nil)
now := metav1.Now()
podToDelete1.DeletionTimestamp = &now
podToDelete2.DeletionTimestamp = &now
return test{
name: "Controller does not claim orphaned pods marked for deletion",
manager: NewPodControllerRefManager(&FakePodControl{},
&controller,
productionLabelSelector,
controllerKind,
func() error { return nil }),
pods: []*v1.Pod{podToDelete1, podToDelete2},
claimed: []*v1.Pod{podToDelete1},
}
}(),
}
for _, test := range tests {
claimed, err := test.manager.ClaimPods(test.pods)
if err != nil {
t.Errorf("Test case `%s`, unexpected error: %v", test.name, err)
} else if !reflect.DeepEqual(test.claimed, claimed) {
t.Errorf("Test case `%s`, claimed wrong pods. Expected %v, got %v", test.name, podToStringSlice(test.claimed), podToStringSlice(claimed))
}
}
}
func podToStringSlice(pods []*v1.Pod) []string {
var names []string
for _, pod := range pods {
names = append(names, pod.Name)
}
return names
}

File diff suppressed because it is too large Load Diff

View File

@ -1,821 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"encoding/json"
"fmt"
"math"
"math/rand"
"net/http/httptest"
"sort"
"sync"
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/api/testapi"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/controller/testutil"
"k8s.io/kubernetes/pkg/securitycontext"
"github.com/stretchr/testify/assert"
)
// NewFakeControllerExpectationsLookup creates a fake store for PodExpectations.
func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectations, *clock.FakeClock) {
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
fakeClock := clock.NewFakeClock(fakeTime)
ttlPolicy := &cache.TTLPolicy{Ttl: ttl, Clock: fakeClock}
ttlStore := cache.NewFakeExpirationStore(
ExpKeyFunc, nil, ttlPolicy, fakeClock)
return &ControllerExpectations{ttlStore}, fakeClock
}
func newReplicationController(replicas int) *v1.ReplicationController {
rc := &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: v1.ReplicationControllerSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: map[string]string{"foo": "bar"},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": "foo",
"type": "production",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
},
},
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSDefault,
NodeSelector: map[string]string{
"baz": "blah",
},
},
},
},
}
return rc
}
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.ReplicationController) *v1.PodList {
pods := []v1.Pod{}
for i := 0; i < count; i++ {
newPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod%d", i),
Labels: rc.Spec.Selector,
Namespace: rc.Namespace,
},
Status: v1.PodStatus{Phase: status},
}
if store != nil {
store.Add(&newPod)
}
pods = append(pods, newPod)
}
return &v1.PodList{
Items: pods,
}
}
func newReplicaSet(name string, replicas int) *apps.ReplicaSet {
return &apps.ReplicaSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: name,
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: apps.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": "foo",
"type": "production",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
},
},
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSDefault,
NodeSelector: map[string]string{
"baz": "blah",
},
},
},
},
}
}
func TestControllerExpectations(t *testing.T) {
ttl := 30 * time.Second
e, fakeClock := NewFakeControllerExpectationsLookup(ttl)
// In practice we can't really have add and delete expectations since we only either create or
// delete replicas in one rc pass, and the rc goes to sleep soon after until the expectations are
// either fulfilled or timeout.
adds, dels := 10, 30
rc := newReplicationController(1)
// RC fires off adds and deletes at apiserver, then sets expectations
rcKey, err := KeyFunc(rc)
assert.NoError(t, err, "Couldn't get key for object %#v: %v", rc, err)
e.SetExpectations(rcKey, adds, dels)
var wg sync.WaitGroup
for i := 0; i < adds+1; i++ {
wg.Add(1)
go func() {
// In prod this can happen either because of a failed create by the rc
// or after having observed a create via informer
e.CreationObserved(rcKey)
wg.Done()
}()
}
wg.Wait()
// There are still delete expectations
assert.False(t, e.SatisfiedExpectations(rcKey), "Rc will sync before expectations are met")
for i := 0; i < dels+1; i++ {
wg.Add(1)
go func() {
e.DeletionObserved(rcKey)
wg.Done()
}()
}
wg.Wait()
// Expectations have been surpassed
podExp, exists, err := e.GetExpectations(rcKey)
assert.NoError(t, err, "Could not get expectations for rc, exists %v and err %v", exists, err)
assert.True(t, exists, "Could not get expectations for rc, exists %v and err %v", exists, err)
add, del := podExp.GetExpectations()
assert.Equal(t, int64(-1), add, "Unexpected pod expectations %#v", podExp)
assert.Equal(t, int64(-1), del, "Unexpected pod expectations %#v", podExp)
assert.True(t, e.SatisfiedExpectations(rcKey), "Expectations are met but the rc will not sync")
// Next round of rc sync, old expectations are cleared
e.SetExpectations(rcKey, 1, 2)
podExp, exists, err = e.GetExpectations(rcKey)
assert.NoError(t, err, "Could not get expectations for rc, exists %v and err %v", exists, err)
assert.True(t, exists, "Could not get expectations for rc, exists %v and err %v", exists, err)
add, del = podExp.GetExpectations()
assert.Equal(t, int64(1), add, "Unexpected pod expectations %#v", podExp)
assert.Equal(t, int64(2), del, "Unexpected pod expectations %#v", podExp)
// Expectations have expired because of ttl
fakeClock.Step(ttl + 1)
assert.True(t, e.SatisfiedExpectations(rcKey),
"Expectations should have expired but didn't")
}
func TestUIDExpectations(t *testing.T) {
uidExp := NewUIDTrackingControllerExpectations(NewControllerExpectations())
rcList := []*v1.ReplicationController{
newReplicationController(2),
newReplicationController(1),
newReplicationController(0),
newReplicationController(5),
}
rcToPods := map[string][]string{}
rcKeys := []string{}
for i := range rcList {
rc := rcList[i]
rcName := fmt.Sprintf("rc-%v", i)
rc.Name = rcName
rc.Spec.Selector[rcName] = rcName
podList := newPodList(nil, 5, v1.PodRunning, rc)
rcKey, err := KeyFunc(rc)
if err != nil {
t.Fatalf("Couldn't get key for object %#v: %v", rc, err)
}
rcKeys = append(rcKeys, rcKey)
rcPodNames := []string{}
for i := range podList.Items {
p := &podList.Items[i]
p.Name = fmt.Sprintf("%v-%v", p.Name, rc.Name)
rcPodNames = append(rcPodNames, PodKey(p))
}
rcToPods[rcKey] = rcPodNames
uidExp.ExpectDeletions(rcKey, rcPodNames)
}
for i := range rcKeys {
j := rand.Intn(i + 1)
rcKeys[i], rcKeys[j] = rcKeys[j], rcKeys[i]
}
for _, rcKey := range rcKeys {
assert.False(t, uidExp.SatisfiedExpectations(rcKey),
"Controller %v satisfied expectations before deletion", rcKey)
for _, p := range rcToPods[rcKey] {
uidExp.DeletionObserved(rcKey, p)
}
assert.True(t, uidExp.SatisfiedExpectations(rcKey),
"Controller %v didn't satisfy expectations after deletion", rcKey)
uidExp.DeleteExpectations(rcKey)
assert.Nil(t, uidExp.GetUIDs(rcKey),
"Failed to delete uid expectations for %v", rcKey)
}
}
func TestCreatePods(t *testing.T) {
ns := metav1.NamespaceDefault
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "empty_pod"}})
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(body),
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
podControl := RealPodControl{
KubeClient: clientset,
Recorder: &record.FakeRecorder{},
}
controllerSpec := newReplicationController(1)
// Make sure createReplica sends a POST to the apiserver with a pod from the controllers pod template
err := podControl.CreatePods(ns, controllerSpec.Spec.Template, controllerSpec)
assert.NoError(t, err, "unexpected error: %v", err)
expectedPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Labels: controllerSpec.Spec.Template.Labels,
GenerateName: fmt.Sprintf("%s-", controllerSpec.Name),
},
Spec: controllerSpec.Spec.Template.Spec,
}
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", metav1.NamespaceDefault, ""), "POST", nil)
var actualPod = &v1.Pod{}
err = json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
assert.NoError(t, err, "unexpected error: %v", err)
assert.True(t, apiequality.Semantic.DeepDerivative(&expectedPod, actualPod),
"Body: %s", fakeHandler.RequestBody)
}
func TestActivePodFiltering(t *testing.T) {
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
rc := newReplicationController(0)
podList := newPodList(nil, 5, v1.PodRunning, rc)
podList.Items[0].Status.Phase = v1.PodSucceeded
podList.Items[1].Status.Phase = v1.PodFailed
expectedNames := sets.NewString()
for _, pod := range podList.Items[2:] {
expectedNames.Insert(pod.Name)
}
var podPointers []*v1.Pod
for i := range podList.Items {
podPointers = append(podPointers, &podList.Items[i])
}
got := FilterActivePods(podPointers)
gotNames := sets.NewString()
for _, pod := range got {
gotNames.Insert(pod.Name)
}
assert.Equal(t, 0, expectedNames.Difference(gotNames).Len(),
"expected %v, got %v", expectedNames.List(), gotNames.List())
assert.Equal(t, 0, gotNames.Difference(expectedNames).Len(),
"expected %v, got %v", expectedNames.List(), gotNames.List())
}
func TestSortingActivePods(t *testing.T) {
numPods := 9
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
rc := newReplicationController(0)
podList := newPodList(nil, numPods, v1.PodRunning, rc)
pods := make([]*v1.Pod, len(podList.Items))
for i := range podList.Items {
pods[i] = &podList.Items[i]
}
// pods[0] is not scheduled yet.
pods[0].Spec.NodeName = ""
pods[0].Status.Phase = v1.PodPending
// pods[1] is scheduled but pending.
pods[1].Spec.NodeName = "bar"
pods[1].Status.Phase = v1.PodPending
// pods[2] is unknown.
pods[2].Spec.NodeName = "foo"
pods[2].Status.Phase = v1.PodUnknown
// pods[3] is running but not ready.
pods[3].Spec.NodeName = "foo"
pods[3].Status.Phase = v1.PodRunning
// pods[4] is running and ready but without LastTransitionTime.
now := metav1.Now()
pods[4].Spec.NodeName = "foo"
pods[4].Status.Phase = v1.PodRunning
pods[4].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
pods[4].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[5] is running and ready and with LastTransitionTime.
pods[5].Spec.NodeName = "foo"
pods[5].Status.Phase = v1.PodRunning
pods[5].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: now}}
pods[5].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[6] is running ready for a longer time than pods[5].
then := metav1.Time{Time: now.AddDate(0, -1, 0)}
pods[6].Spec.NodeName = "foo"
pods[6].Status.Phase = v1.PodRunning
pods[6].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[6].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[7] has lower container restart count than pods[6].
pods[7].Spec.NodeName = "foo"
pods[7].Status.Phase = v1.PodRunning
pods[7].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[7].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
pods[7].CreationTimestamp = now
// pods[8] is older than pods[7].
pods[8].Spec.NodeName = "foo"
pods[8].Status.Phase = v1.PodRunning
pods[8].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[8].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
pods[8].CreationTimestamp = then
getOrder := func(pods []*v1.Pod) []string {
names := make([]string, len(pods))
for i := range pods {
names[i] = pods[i].Name
}
return names
}
expected := getOrder(pods)
for i := 0; i < 20; i++ {
idx := rand.Perm(numPods)
randomizedPods := make([]*v1.Pod, numPods)
for j := 0; j < numPods; j++ {
randomizedPods[j] = pods[idx[j]]
}
sort.Sort(ActivePods(randomizedPods))
actual := getOrder(randomizedPods)
assert.EqualValues(t, expected, actual, "expected %v, got %v", expected, actual)
}
}
func TestActiveReplicaSetsFiltering(t *testing.T) {
var replicaSets []*apps.ReplicaSet
replicaSets = append(replicaSets, newReplicaSet("zero", 0))
replicaSets = append(replicaSets, nil)
replicaSets = append(replicaSets, newReplicaSet("foo", 1))
replicaSets = append(replicaSets, newReplicaSet("bar", 2))
expectedNames := sets.NewString()
for _, rs := range replicaSets[2:] {
expectedNames.Insert(rs.Name)
}
got := FilterActiveReplicaSets(replicaSets)
gotNames := sets.NewString()
for _, rs := range got {
gotNames.Insert(rs.Name)
}
assert.Equal(t, 0, expectedNames.Difference(gotNames).Len(),
"expected %v, got %v", expectedNames.List(), gotNames.List())
assert.Equal(t, 0, gotNames.Difference(expectedNames).Len(),
"expected %v, got %v", expectedNames.List(), gotNames.List())
}
func TestComputeHash(t *testing.T) {
collisionCount := int32(1)
otherCollisionCount := int32(2)
maxCollisionCount := int32(math.MaxInt32)
tests := []struct {
name string
template *v1.PodTemplateSpec
collisionCount *int32
otherCollisionCount *int32
}{
{
name: "simple",
template: &v1.PodTemplateSpec{},
collisionCount: &collisionCount,
otherCollisionCount: &otherCollisionCount,
},
{
name: "using math.MaxInt64",
template: &v1.PodTemplateSpec{},
collisionCount: nil,
otherCollisionCount: &maxCollisionCount,
},
}
for _, test := range tests {
hash := ComputeHash(test.template, test.collisionCount)
otherHash := ComputeHash(test.template, test.otherCollisionCount)
assert.NotEqual(t, hash, otherHash, "expected different hashes but got the same: %d", hash)
}
}
func TestRemoveTaintOffNode(t *testing.T) {
tests := []struct {
name string
nodeHandler *testutil.FakeNodeHandler
nodeName string
taintsToRemove []*v1.Taint
expectedTaints []v1.Taint
requestCount int
}{
{
name: "remove one taint from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
},
requestCount: 4,
},
{
name: "remove multiple taints from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
requestCount: 4,
},
{
name: "remove no-exist taints from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 2,
},
{
name: "remove taint from node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: nil,
requestCount: 2,
},
{
name: "remove empty taint list from node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{},
expectedTaints: nil,
requestCount: 2,
},
{
name: "remove empty taint list from node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToRemove: []*v1.Taint{},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 2,
},
}
for _, test := range tests {
node, _ := test.nodeHandler.Get(test.nodeName, metav1.GetOptions{})
err := RemoveTaintOffNode(test.nodeHandler, test.nodeName, node, test.taintsToRemove...)
assert.NoError(t, err, "%s: RemoveTaintOffNode() error = %v", test.name, err)
node, _ = test.nodeHandler.Get(test.nodeName, metav1.GetOptions{})
assert.EqualValues(t, test.expectedTaints, node.Spec.Taints,
"%s: failed to remove taint off node: expected %+v, got %+v",
test.name, test.expectedTaints, node.Spec.Taints)
assert.Equal(t, test.requestCount, test.nodeHandler.RequestCount,
"%s: unexpected request count: expected %+v, got %+v",
test.name, test.requestCount, test.nodeHandler.RequestCount)
}
}
func TestAddOrUpdateTaintOnNode(t *testing.T) {
tests := []struct {
name string
nodeHandler *testutil.FakeNodeHandler
nodeName string
taintsToAdd []*v1.Taint
expectedTaints []v1.Taint
requestCount int
}{
{
name: "add one taint on node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 3,
},
{
name: "add multiple taints to node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
{Key: "key4", Value: "value4", Effect: "NoExecute"},
},
requestCount: 3,
},
{
name: "add exist taints to node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 2,
},
{
name: "add taint to node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
expectedTaints: []v1.Taint{
{Key: "key3", Value: "value3", Effect: "NoSchedule"},
},
requestCount: 3,
},
{
name: "add empty taint list to node without taints",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{},
expectedTaints: nil,
requestCount: 1,
},
{
name: "add empty taint list to node",
nodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
nodeName: "node1",
taintsToAdd: []*v1.Taint{},
expectedTaints: []v1.Taint{
{Key: "key1", Value: "value1", Effect: "NoSchedule"},
{Key: "key2", Value: "value2", Effect: "NoExecute"},
},
requestCount: 1,
},
}
for _, test := range tests {
err := AddOrUpdateTaintOnNode(test.nodeHandler, test.nodeName, test.taintsToAdd...)
assert.NoError(t, err, "%s: AddOrUpdateTaintOnNode() error = %v", test.name, err)
node, _ := test.nodeHandler.Get(test.nodeName, metav1.GetOptions{})
assert.EqualValues(t, test.expectedTaints, node.Spec.Taints,
"%s: failed to add taint to node: expected %+v, got %+v",
test.name, test.expectedTaints, node.Spec.Taints)
assert.Equal(t, test.requestCount, test.nodeHandler.RequestCount,
"%s: unexpected request count: expected %+v, got %+v",
test.name, test.requestCount, test.nodeHandler.RequestCount)
}
}

View File

@ -1,74 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cronjob_controller.go",
"doc.go",
"injection.go",
"utils.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/cronjob",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/robfig/cron:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"cronjob_controller_test.go",
"utils_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/batch/install:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,8 +0,0 @@
approvers:
- erictune
- janetkuo
- soltysh
reviewers:
- erictune
- janetkuo
- soltysh

View File

@ -1,408 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
/*
I did not use watch or expectations. Those add a lot of corner cases, and we aren't
expecting a large volume of jobs or scheduledJobs. (We are favoring correctness
over scalability. If we find a single controller thread is too slow because
there are a lot of Jobs or CronJobs, we we can parallelize by Namespace.
If we find the load on the API server is too high, we can use a watch and
UndeltaStore.)
Just periodically list jobs and SJs, and then reconcile them.
*/
import (
"fmt"
"sort"
"time"
"github.com/golang/glog"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/util/metrics"
)
// Utilities for dealing with Jobs and CronJobs and time.
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = batchv1beta1.SchemeGroupVersion.WithKind("CronJob")
type CronJobController struct {
kubeClient clientset.Interface
jobControl jobControlInterface
sjControl sjControlInterface
podControl podControlInterface
recorder record.EventRecorder
}
func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
jm := &CronJobController{
kubeClient: kubeClient,
jobControl: realJobControl{KubeClient: kubeClient},
sjControl: &realSJControl{KubeClient: kubeClient},
podControl: &realPodControl{KubeClient: kubeClient},
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cronjob-controller"}),
}
return jm, nil
}
// Run the main goroutine responsible for watching and syncing jobs.
func (jm *CronJobController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting CronJob Manager")
// Check things every 10 second.
go wait.Until(jm.syncAll, 10*time.Second, stopCh)
<-stopCh
glog.Infof("Shutting down CronJob Manager")
}
// syncAll lists all the CronJobs and Jobs and reconciles them.
func (jm *CronJobController) syncAll() {
// List children (Jobs) before parents (CronJob).
// This guarantees that if we see any Job that got orphaned by the GC orphan finalizer,
// we must also see that the parent CronJob has non-nil DeletionTimestamp (see #42639).
// Note that this only works because we are NOT using any caches here.
jl, err := jm.kubeClient.BatchV1().Jobs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
utilruntime.HandleError(fmt.Errorf("can't list Jobs: %v", err))
return
}
js := jl.Items
glog.V(4).Infof("Found %d jobs", len(js))
sjl, err := jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
utilruntime.HandleError(fmt.Errorf("can't list CronJobs: %v", err))
return
}
sjs := sjl.Items
glog.V(4).Infof("Found %d cronjobs", len(sjs))
jobsBySj := groupJobsByParent(js)
glog.V(4).Infof("Found %d groups", len(jobsBySj))
for _, sj := range sjs {
syncOne(&sj, jobsBySj[sj.UID], time.Now(), jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
cleanupFinishedJobs(&sj, jobsBySj[sj.UID], jm.jobControl, jm.sjControl, jm.podControl, jm.recorder)
}
}
// cleanupFinishedJobs cleanups finished jobs created by a CronJob
func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobControlInterface,
sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
// If neither limits are active, there is no need to do anything.
if sj.Spec.FailedJobsHistoryLimit == nil && sj.Spec.SuccessfulJobsHistoryLimit == nil {
return
}
failedJobs := []batchv1.Job{}
succesfulJobs := []batchv1.Job{}
for _, job := range js {
isFinished, finishedStatus := getFinishedStatus(&job)
if isFinished && finishedStatus == batchv1.JobComplete {
succesfulJobs = append(succesfulJobs, job)
} else if isFinished && finishedStatus == batchv1.JobFailed {
failedJobs = append(failedJobs, job)
}
}
if sj.Spec.SuccessfulJobsHistoryLimit != nil {
removeOldestJobs(sj,
succesfulJobs,
jc,
pc,
*sj.Spec.SuccessfulJobsHistoryLimit,
recorder)
}
if sj.Spec.FailedJobsHistoryLimit != nil {
removeOldestJobs(sj,
failedJobs,
jc,
pc,
*sj.Spec.FailedJobsHistoryLimit,
recorder)
}
// Update the CronJob, in case jobs were removed from the list.
if _, err := sjc.UpdateStatus(sj); err != nil {
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
}
// removeOldestJobs removes the oldest jobs from a list of jobs
func removeOldestJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobControlInterface,
pc podControlInterface, maxJobs int32, recorder record.EventRecorder) {
numToDelete := len(js) - int(maxJobs)
if numToDelete <= 0 {
return
}
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
glog.V(4).Infof("Cleaning up %d/%d jobs from %s", numToDelete, len(js), nameForLog)
sort.Sort(byJobStartTime(js))
for i := 0; i < numToDelete; i++ {
glog.V(4).Infof("Removing job %s from %s", js[i].Name, nameForLog)
deleteJob(sj, &js[i], jc, pc, recorder, "history limit reached")
}
}
// syncOne reconciles a CronJob with a list of any Jobs that it created.
// All known jobs created by "sj" should be included in "js".
// The current time is passed in to facilitate testing.
// It has no receiver, to facilitate testing.
func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobControlInterface, sjc sjControlInterface, pc podControlInterface, recorder record.EventRecorder) {
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
childrenJobs := make(map[types.UID]bool)
for _, j := range js {
childrenJobs[j.ObjectMeta.UID] = true
found := inActiveList(*sj, j.ObjectMeta.UID)
if !found && !IsJobFinished(&j) {
recorder.Eventf(sj, v1.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
// We found an unfinished job that has us as the parent, but it is not in our Active list.
// This could happen if we crashed right after creating the Job and before updating the status,
// or if our jobs list is newer than our sj status after a relist, or if someone intentionally created
// a job that they wanted us to adopt.
// TODO: maybe handle the adoption case? Concurrency/suspend rules will not apply in that case, obviously, since we can't
// stop users from creating jobs if they have permission. It is assumed that if a
// user has permission to create a job within a namespace, then they have permission to make any scheduledJob
// in the same namespace "adopt" that job. ReplicaSets and their Pods work the same way.
// TBS: how to update sj.Status.LastScheduleTime if the adopted job is newer than any we knew about?
} else if found && IsJobFinished(&j) {
deleteFromActiveList(sj, j.ObjectMeta.UID)
// TODO: event to call out failure vs success.
recorder.Eventf(sj, v1.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
}
}
// Remove any job reference from the active list if the corresponding job does not exist any more.
// Otherwise, the cronjob may be stuck in active mode forever even though there is no matching
// job running.
for _, j := range sj.Status.Active {
if found := childrenJobs[j.UID]; !found {
recorder.Eventf(sj, v1.EventTypeNormal, "MissingJob", "Active job went missing: %v", j.Name)
deleteFromActiveList(sj, j.UID)
}
}
updatedSJ, err := sjc.UpdateStatus(sj)
if err != nil {
glog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
return
}
*sj = *updatedSJ
if sj.DeletionTimestamp != nil {
// The CronJob is being deleted.
// Don't do anything other than updating status.
return
}
if sj.Spec.Suspend != nil && *sj.Spec.Suspend {
glog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog)
return
}
times, err := getRecentUnmetScheduleTimes(*sj, now)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedNeedsStart", "Cannot determine if job needs to be started: %v", err)
glog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err)
return
}
// TODO: handle multiple unmet start times, from oldest to newest, updating status as needed.
if len(times) == 0 {
glog.V(4).Infof("No unmet start times for %s", nameForLog)
return
}
if len(times) > 1 {
glog.V(4).Infof("Multiple unmet start times for %s so only starting last one", nameForLog)
}
scheduledTime := times[len(times)-1]
tooLate := false
if sj.Spec.StartingDeadlineSeconds != nil {
tooLate = scheduledTime.Add(time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds)).Before(now)
}
if tooLate {
glog.V(4).Infof("Missed starting window for %s", nameForLog)
recorder.Eventf(sj, v1.EventTypeWarning, "MissSchedule", "Missed scheduled time to start a job: %s", scheduledTime.Format(time.RFC1123Z))
// TODO: Since we don't set LastScheduleTime when not scheduling, we are going to keep noticing
// the miss every cycle. In order to avoid sending multiple events, and to avoid processing
// the sj again and again, we could set a Status.LastMissedTime when we notice a miss.
// Then, when we call getRecentUnmetScheduleTimes, we can take max(creationTimestamp,
// Status.LastScheduleTime, Status.LastMissedTime), and then so we won't generate
// and event the next time we process it, and also so the user looking at the status
// can see easily that there was a missed execution.
return
}
if sj.Spec.ConcurrencyPolicy == batchv1beta1.ForbidConcurrent && len(sj.Status.Active) > 0 {
// Regardless which source of information we use for the set of active jobs,
// there is some risk that we won't see an active job when there is one.
// (because we haven't seen the status update to the SJ or the created pod).
// So it is theoretically possible to have concurrency with Forbid.
// As long the as the invocations are "far enough apart in time", this usually won't happen.
//
// TODO: for Forbid, we could use the same name for every execution, as a lock.
// With replace, we could use a name that is deterministic per execution time.
// But that would mean that you could not inspect prior successes or failures of Forbid jobs.
glog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog)
return
}
if sj.Spec.ConcurrencyPolicy == batchv1beta1.ReplaceConcurrent {
for _, j := range sj.Status.Active {
// TODO: this should be replaced with server side job deletion
// currently this mimics JobReaper from pkg/kubectl/stop.go
glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
job, err := jc.GetJob(j.Namespace, j.Name)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedGet", "Get job: %v", err)
return
}
if !deleteJob(sj, job, jc, pc, recorder, "") {
return
}
}
}
jobReq, err := getJobFromTemplate(sj, scheduledTime)
if err != nil {
glog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err)
return
}
jobResp, err := jc.CreateJob(sj.Namespace, jobReq)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
return
}
glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
// ------------------------------------------------------------------ //
// If this process restarts at this point (after posting a job, but
// before updating the status), then we might try to start the job on
// the next time. Actually, if we relist the SJs and Jobs on the next
// iteration of syncAll, we might not see our own status update, and
// then post one again. So, we need to use the job name as a lock to
// prevent us from making the job twice (name the job with hash of its
// scheduled time).
// Add the just-started job to the status list.
ref, err := getRef(jobResp)
if err != nil {
glog.V(2).Infof("Unable to make object reference for job for %s", nameForLog)
} else {
sj.Status.Active = append(sj.Status.Active, *ref)
}
sj.Status.LastScheduleTime = &metav1.Time{Time: scheduledTime}
if _, err := sjc.UpdateStatus(sj); err != nil {
glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err)
}
return
}
// deleteJob reaps a job, deleting the job, the pobs and the reference in the active list
func deleteJob(sj *batchv1beta1.CronJob, job *batchv1.Job, jc jobControlInterface,
pc podControlInterface, recorder record.EventRecorder, reason string) bool {
// TODO: this should be replaced with server side job deletion
// currencontinuetly this mimics JobReaper from pkg/kubectl/stop.go
nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name)
// scale job down to 0
if *job.Spec.Parallelism != 0 {
zero := int32(0)
var err error
job.Spec.Parallelism = &zero
job, err = jc.UpdateJob(job.Namespace, job)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
return false
}
}
// remove all pods...
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := pc.ListPods(job.Namespace, options)
if err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
return false
}
errList := []error{}
for _, pod := range podList.Items {
glog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name)
if err := pc.DeletePod(pod.Namespace, pod.Name); err != nil {
// ignores the error when the pod isn't found
if !errors.IsNotFound(err) {
errList = append(errList, err)
}
}
}
if len(errList) != 0 {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
return false
}
// ... the job itself...
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
return false
}
// ... and its reference from active list
deleteFromActiveList(sj, job.ObjectMeta.UID)
recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", job.Name)
return true
}
func getRef(object runtime.Object) (*v1.ObjectReference, error) {
return ref.GetReference(scheme.Scheme, object)
}

View File

@ -1,782 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"errors"
"strconv"
"strings"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
batchV1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
// For the cronjob controller to do conversions.
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
)
var (
// schedule is hourly on the hour
onTheHour string = "0 * * * ?"
errorSchedule string = "obvious error schedule"
)
func justBeforeTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T09:59:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func topOfTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:00:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justAfterTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:01:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func weekAfterTheHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-26T10:00:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justBeforeThePriorHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T08:59:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func justAfterThePriorHour() time.Time {
T1, err := time.Parse(time.RFC3339, "2016-05-19T09:01:00Z")
if err != nil {
panic("test setup error")
}
return T1
}
func startTimeStringToTime(startTime string) time.Time {
T1, err := time.Parse(time.RFC3339, startTime)
if err != nil {
panic("test setup error")
}
return T1
}
// returns a cronJob with some fields filled in.
func cronJob() batchV1beta1.CronJob {
return batchV1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: "snazzycats",
UID: types.UID("1a2b3c"),
SelfLink: "/apis/batch/v1beta1/namespaces/snazzycats/cronjobs/mycronjob",
CreationTimestamp: metav1.Time{Time: justBeforeTheHour()},
},
Spec: batchV1beta1.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batchV1beta1.AllowConcurrent,
JobTemplate: batchV1beta1.JobTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"},
},
Spec: jobSpec(),
},
},
}
}
func jobSpec() batchv1.JobSpec {
one := int32(1)
return batchv1.JobSpec{
Parallelism: &one,
Completions: &one,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
}
}
func newJob(UID string) batchv1.Job {
return batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(UID),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob",
},
Spec: jobSpec(),
}
}
var (
shortDead int64 = 10
mediumDead int64 = 2 * 60 * 60
longDead int64 = 1000000
noDead int64 = -12345
A batchV1beta1.ConcurrencyPolicy = batchV1beta1.AllowConcurrent
f batchV1beta1.ConcurrencyPolicy = batchV1beta1.ForbidConcurrent
R batchV1beta1.ConcurrencyPolicy = batchV1beta1.ReplaceConcurrent
T bool = true
F bool = false
)
func TestSyncOne_RunOrNot(t *testing.T) {
// Check expectations on deadline parameters
if shortDead/60/60 >= 1 {
t.Errorf("shortDead should be less than one hour")
}
if mediumDead/60/60 < 1 || mediumDead/60/60 >= 24 {
t.Errorf("mediumDead should be between one hour and one day")
}
if longDead/60/60/24 < 10 {
t.Errorf("longDead should be at least ten days")
}
testCases := map[string]struct {
// sj spec
concurrencyPolicy batchV1beta1.ConcurrencyPolicy
suspend bool
schedule string
deadline int64
// sj status
ranPreviously bool
stillActive bool
// environment
now time.Time
// expectations
expectCreate bool
expectDelete bool
expectActive int
expectedWarnings int
}{
"never ran, not valid schedule, A": {A, F, errorSchedule, noDead, F, F, justBeforeTheHour(), F, F, 0, 1},
"never ran, not valid schedule, F": {f, F, errorSchedule, noDead, F, F, justBeforeTheHour(), F, F, 0, 1},
"never ran, not valid schedule, R": {f, F, errorSchedule, noDead, F, F, justBeforeTheHour(), F, F, 0, 1},
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, 0, 0},
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), T, F, 1, 0},
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, 0, 0},
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, 0, 0},
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
"prev ran but done, not time, F": {f, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
"prev ran but done, not time, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, 0, 0},
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, 1, 0},
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, 0, 0},
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, 0, 0},
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, F, 1, 0},
"still active, not time, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
"still active, not time, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
"still active, not time, R": {R, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, 1, 0},
"still active, is time, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, 2, 0},
"still active, is time, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1, 0},
"still active, is time, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, T, 1, 0},
"still active, is time, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, 1, 0},
"still active, is time, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, 1, 0},
"still active, is time, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, F, 2, 0},
// Controller should fail to schedule these, as there are too many missed starting times
// and either no deadline or a too long deadline.
"prev ran but done, long overdue, not past deadline, A": {A, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, not past deadline, R": {R, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, not past deadline, F": {f, F, onTheHour, longDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, no deadline, A": {A, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, no deadline, R": {R, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, no deadline, F": {f, F, onTheHour, noDead, T, F, weekAfterTheHour(), F, F, 0, 1},
"prev ran but done, long overdue, past medium deadline, A": {A, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past short deadline, A": {A, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past medium deadline, R": {R, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past short deadline, R": {R, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past medium deadline, F": {f, F, onTheHour, mediumDead, T, F, weekAfterTheHour(), T, F, 1, 0},
"prev ran but done, long overdue, past short deadline, F": {f, F, onTheHour, shortDead, T, F, weekAfterTheHour(), T, F, 1, 0},
}
for name, tc := range testCases {
sj := cronJob()
sj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy
sj.Spec.Suspend = &tc.suspend
sj.Spec.Schedule = tc.schedule
if tc.deadline != noDead {
sj.Spec.StartingDeadlineSeconds = &tc.deadline
}
var (
job *batchv1.Job
err error
)
js := []batchv1.Job{}
if tc.ranPreviously {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()}
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
job, err = getJobFromTemplate(&sj, sj.Status.LastScheduleTime.Time)
if err != nil {
t.Fatalf("%s: nexpected error creating a job from template: %v", name, err)
}
job.UID = "1234"
job.Namespace = ""
if tc.stillActive {
sj.Status.Active = []v1.ObjectReference{{UID: job.UID}}
js = append(js, *job)
}
} else {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
if tc.stillActive {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
}
jc := &fakeJobControl{Job: job}
sjc := &fakeSJControl{}
pc := &fakePodControl{}
recorder := record.NewFakeRecorder(10)
syncOne(&sj, js, tc.now, jc, sjc, pc, recorder)
expectedCreates := 0
if tc.expectCreate {
expectedCreates = 1
}
if len(jc.Jobs) != expectedCreates {
t.Errorf("%s: expected %d job started, actually %v", name, expectedCreates, len(jc.Jobs))
}
for i := range jc.Jobs {
job := &jc.Jobs[i]
controllerRef := metav1.GetControllerOf(job)
if controllerRef == nil {
t.Errorf("%s: expected job to have ControllerRef: %#v", name, job)
} else {
if got, want := controllerRef.APIVersion, "batch/v1beta1"; got != want {
t.Errorf("%s: controllerRef.APIVersion = %q, want %q", name, got, want)
}
if got, want := controllerRef.Kind, "CronJob"; got != want {
t.Errorf("%s: controllerRef.Kind = %q, want %q", name, got, want)
}
if got, want := controllerRef.Name, sj.Name; got != want {
t.Errorf("%s: controllerRef.Name = %q, want %q", name, got, want)
}
if got, want := controllerRef.UID, sj.UID; got != want {
t.Errorf("%s: controllerRef.UID = %q, want %q", name, got, want)
}
if controllerRef.Controller == nil || *controllerRef.Controller != true {
t.Errorf("%s: controllerRef.Controller is not set to true", name)
}
}
}
expectedDeletes := 0
if tc.expectDelete {
expectedDeletes = 1
}
if len(jc.DeleteJobName) != expectedDeletes {
t.Errorf("%s: expected %d job deleted, actually %v", name, expectedDeletes, len(jc.DeleteJobName))
}
// Status update happens once when ranging through job list, and another one if create jobs.
expectUpdates := 1
expectedEvents := 0
if tc.expectCreate {
expectedEvents++
expectUpdates++
}
if tc.expectDelete {
expectedEvents++
}
expectedEvents += tc.expectedWarnings
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events))
}
numWarnings := 0
for i := 1; i <= len(recorder.Events); i++ {
e := <-recorder.Events
if strings.HasPrefix(e, v1.EventTypeWarning) {
numWarnings += 1
}
}
if numWarnings != tc.expectedWarnings {
t.Errorf("%s: expected %d warnings, actually %v", name, tc.expectedWarnings, numWarnings)
}
if tc.expectActive != len(sjc.Updates[expectUpdates-1].Status.Active) {
t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, len(sjc.Updates[expectUpdates-1].Status.Active))
}
}
}
type CleanupJobSpec struct {
StartTime string
IsFinished bool
IsSuccessful bool
ExpectDelete bool
IsStillInActiveList bool // only when IsFinished is set
}
func TestCleanupFinishedJobs_DeleteOrNot(t *testing.T) {
limitThree := int32(3)
limitTwo := int32(2)
limitOne := int32(1)
limitZero := int32(0)
// Starting times are assumed to be sorted by increasing start time
// in all the test cases
testCases := map[string]struct {
jobSpecs []CleanupJobSpec
now time.Time
successfulJobsHistoryLimit *int32
failedJobsHistoryLimit *int32
expectActive int
}{
"success. job limit reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, T, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitTwo, &limitOne, 1},
"success. jobs not processed by Sync yet": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, T, T, T},
{"2016-05-19T06:00:00Z", T, T, F, T},
{"2016-05-19T07:00:00Z", T, T, F, T},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, T},
}, justBeforeTheHour(), &limitTwo, &limitOne, 4},
"failed job limit reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, F, T, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitTwo, &limitTwo, 0},
"success. job limit set to zero": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, T, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, T, F},
{"2016-05-19T07:00:00Z", T, T, T, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitOne, 1},
"failed job limit set to zero": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, T, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", T, F, T, F},
}, justBeforeTheHour(), &limitThree, &limitZero, 1},
"no limits reached": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitThree, &limitThree, 0},
// This test case should trigger the short-circuit
"limits disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), nil, nil, 0},
"success limit disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), nil, &limitThree, 0},
"failure limit disabled": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, T, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitThree, nil, 0},
"no limits reached because still active": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", F, F, F, F},
{"2016-05-19T05:00:00Z", F, F, F, F},
{"2016-05-19T06:00:00Z", F, F, F, F},
{"2016-05-19T07:00:00Z", F, F, F, F},
{"2016-05-19T08:00:00Z", F, F, F, F},
{"2016-05-19T09:00:00Z", F, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitZero, 6},
"failed list pod err": {
[]CleanupJobSpec{
{"2016-05-19T04:00:00Z", T, F, F, F},
{"2016-05-19T05:00:00Z", T, F, F, F},
{"2016-05-19T06:00:00Z", T, T, F, F},
{"2016-05-19T07:00:00Z", T, T, F, F},
{"2016-05-19T08:00:00Z", T, F, F, F},
{"2016-05-19T09:00:00Z", T, F, F, F},
}, justBeforeTheHour(), &limitZero, &limitZero, 0},
}
for name, tc := range testCases {
sj := cronJob()
suspend := false
sj.Spec.ConcurrencyPolicy = f
sj.Spec.Suspend = &suspend
sj.Spec.Schedule = onTheHour
sj.Spec.SuccessfulJobsHistoryLimit = tc.successfulJobsHistoryLimit
sj.Spec.FailedJobsHistoryLimit = tc.failedJobsHistoryLimit
var (
job *batchv1.Job
err error
)
// Set consistent timestamps for the CronJob
if len(tc.jobSpecs) != 0 {
firstTime := startTimeStringToTime(tc.jobSpecs[0].StartTime)
lastTime := startTimeStringToTime(tc.jobSpecs[len(tc.jobSpecs)-1].StartTime)
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: firstTime}
sj.Status.LastScheduleTime = &metav1.Time{Time: lastTime}
} else {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
}
// Create jobs
js := []batchv1.Job{}
jobsToDelete := sets.NewString()
sj.Status.Active = []v1.ObjectReference{}
for i, spec := range tc.jobSpecs {
job, err = getJobFromTemplate(&sj, startTimeStringToTime(spec.StartTime))
if err != nil {
t.Fatalf("%s: unexpected error creating a job from template: %v", name, err)
}
job.UID = types.UID(strconv.Itoa(i))
job.Namespace = ""
if spec.IsFinished {
var conditionType batchv1.JobConditionType
if spec.IsSuccessful {
conditionType = batchv1.JobComplete
} else {
conditionType = batchv1.JobFailed
}
condition := batchv1.JobCondition{Type: conditionType, Status: v1.ConditionTrue}
job.Status.Conditions = append(job.Status.Conditions, condition)
if spec.IsStillInActiveList {
sj.Status.Active = append(sj.Status.Active, v1.ObjectReference{UID: job.UID})
}
} else {
if spec.IsSuccessful || spec.IsStillInActiveList {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
sj.Status.Active = append(sj.Status.Active, v1.ObjectReference{UID: job.UID})
}
js = append(js, *job)
if spec.ExpectDelete {
jobsToDelete.Insert(job.Name)
}
}
jc := &fakeJobControl{Job: job}
pc := &fakePodControl{}
sjc := &fakeSJControl{}
recorder := record.NewFakeRecorder(10)
if name == "failed list pod err" {
pc.Err = errors.New("fakePodControl err")
}
cleanupFinishedJobs(&sj, js, jc, sjc, pc, recorder)
// Check we have actually deleted the correct jobs
if len(jc.DeleteJobName) != len(jobsToDelete) {
t.Errorf("%s: expected %d job deleted, actually %d", name, len(jobsToDelete), len(jc.DeleteJobName))
} else {
jcDeleteJobName := sets.NewString(jc.DeleteJobName...)
if !jcDeleteJobName.Equal(jobsToDelete) {
t.Errorf("%s: expected jobs: %v deleted, actually: %v deleted", name, jobsToDelete, jcDeleteJobName)
}
}
// Check for events
expectedEvents := len(jobsToDelete)
if name == "failed list pod err" {
expectedEvents = len(tc.jobSpecs)
}
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events))
}
// Check for jobs still in active list
numActive := 0
if len(sjc.Updates) != 0 {
numActive = len(sjc.Updates[len(sjc.Updates)-1].Status.Active)
}
if tc.expectActive != numActive {
t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, numActive)
}
}
}
// TODO: simulation where the controller randomly doesn't run, and randomly has errors starting jobs or deleting jobs,
// but over time, all jobs run as expected (assuming Allow and no deadline).
// TestSyncOne_Status tests sj.UpdateStatus in syncOne
func TestSyncOne_Status(t *testing.T) {
finishedJob := newJob("1")
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batchv1.JobCondition{Type: batchv1.JobComplete, Status: v1.ConditionTrue})
unexpectedJob := newJob("2")
missingJob := newJob("3")
testCases := map[string]struct {
// sj spec
concurrencyPolicy batchV1beta1.ConcurrencyPolicy
suspend bool
schedule string
deadline int64
// sj status
ranPreviously bool
hasFinishedJob bool
// environment
now time.Time
hasUnexpectedJob bool
hasMissingJob bool
beingDeleted bool
// expectations
expectCreate bool
expectDelete bool
}{
"never ran, not time, A": {A, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F, F, F},
"never ran, not time, F": {f, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F, F, F},
"never ran, not time, R": {R, F, onTheHour, noDead, F, F, justBeforeTheHour(), F, F, F, F, F},
"never ran, is time, A": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, T, F},
"never ran, is time, F": {f, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, T, F},
"never ran, is time, R": {R, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, T, F},
"never ran, is time, deleting": {A, F, onTheHour, noDead, F, F, justAfterTheHour(), F, F, T, F, F},
"never ran, is time, suspended": {A, T, onTheHour, noDead, F, F, justAfterTheHour(), F, F, F, F, F},
"never ran, is time, past deadline": {A, F, onTheHour, shortDead, F, F, justAfterTheHour(), F, F, F, F, F},
"never ran, is time, not past deadline": {A, F, onTheHour, longDead, F, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, not time, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, F, F, F, F},
"prev ran but done, not time, finished job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, F, F, F},
"prev ran but done, not time, unexpected job, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, F, F, F, F},
"prev ran but done, not time, missing job, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, missing job, unexpected job, A": {A, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, T, F, F, F},
"prev ran but done, not time, finished job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), T, F, F, F, F},
"prev ran but done, not time, finished job, missing job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, finished job, missing job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justBeforeTheHour(), T, T, F, F, F},
"prev ran but done, not time, finished job, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, F, F, F, F},
"prev ran but done, not time, missing job, F": {f, F, onTheHour, noDead, T, F, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, finished job, missing job, F": {f, F, onTheHour, noDead, T, T, justBeforeTheHour(), F, T, F, F, F},
"prev ran but done, not time, unexpected job, R": {R, F, onTheHour, noDead, T, F, justBeforeTheHour(), T, F, F, F, F},
"prev ran but done, is time, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, A": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, A": {A, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, F": {f, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, F": {f, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, R": {R, F, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, R": {R, F, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, deleting": {A, F, onTheHour, noDead, T, F, justAfterTheHour(), F, F, T, F, F},
"prev ran but done, is time, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, finished job, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, unexpected job, suspended": {A, T, onTheHour, noDead, T, F, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, finished job, unexpected job, suspended": {A, T, onTheHour, noDead, T, T, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, finished job, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), F, F, F, F, F},
"prev ran but done, is time, unexpected job, past deadline": {A, F, onTheHour, shortDead, T, F, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, finished job, unexpected job, past deadline": {A, F, onTheHour, shortDead, T, T, justAfterTheHour(), T, F, F, F, F},
"prev ran but done, is time, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, finished job, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), F, F, F, T, F},
"prev ran but done, is time, unexpected job, not past deadline": {A, F, onTheHour, longDead, T, F, justAfterTheHour(), T, F, F, T, F},
"prev ran but done, is time, finished job, unexpected job, not past deadline": {A, F, onTheHour, longDead, T, T, justAfterTheHour(), T, F, F, T, F},
}
for name, tc := range testCases {
// Setup the test
sj := cronJob()
sj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy
sj.Spec.Suspend = &tc.suspend
sj.Spec.Schedule = tc.schedule
if tc.deadline != noDead {
sj.Spec.StartingDeadlineSeconds = &tc.deadline
}
if tc.ranPreviously {
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()}
sj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()}
} else {
if tc.hasFinishedJob || tc.hasUnexpectedJob || tc.hasMissingJob {
t.Errorf("%s: test setup error: this case makes no sense", name)
}
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()}
}
jobs := []batchv1.Job{}
if tc.hasFinishedJob {
ref, err := getRef(&finishedJob)
if err != nil {
t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err)
}
sj.Status.Active = []v1.ObjectReference{*ref}
jobs = append(jobs, finishedJob)
}
if tc.hasUnexpectedJob {
jobs = append(jobs, unexpectedJob)
}
if tc.hasMissingJob {
ref, err := getRef(&missingJob)
if err != nil {
t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err)
}
sj.Status.Active = append(sj.Status.Active, *ref)
}
if tc.beingDeleted {
timestamp := metav1.NewTime(tc.now)
sj.DeletionTimestamp = &timestamp
}
jc := &fakeJobControl{}
sjc := &fakeSJControl{}
pc := &fakePodControl{}
recorder := record.NewFakeRecorder(10)
// Run the code
syncOne(&sj, jobs, tc.now, jc, sjc, pc, recorder)
// Status update happens once when ranging through job list, and another one if create jobs.
expectUpdates := 1
// Events happens when there's unexpected / finished jobs, and upon job creation / deletion.
expectedEvents := 0
if tc.expectCreate {
expectUpdates++
expectedEvents++
}
if tc.expectDelete {
expectedEvents++
}
if tc.hasFinishedJob {
expectedEvents++
}
if tc.hasUnexpectedJob {
expectedEvents++
}
if tc.hasMissingJob {
expectedEvents++
}
if len(recorder.Events) != expectedEvents {
t.Errorf("%s: expected %d event, actually %v: %#v", name, expectedEvents, len(recorder.Events), recorder.Events)
}
if expectUpdates != len(sjc.Updates) {
t.Errorf("%s: expected %d status updates, actually %d", name, expectUpdates, len(sjc.Updates))
}
if tc.hasFinishedJob && inActiveList(sjc.Updates[0], finishedJob.UID) {
t.Errorf("%s: expected finished job removed from active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.hasUnexpectedJob && inActiveList(sjc.Updates[0], unexpectedJob.UID) {
t.Errorf("%s: expected unexpected job not added to active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.hasMissingJob && inActiveList(sjc.Updates[0], missingJob.UID) {
t.Errorf("%s: expected missing job to be removed from active list, actually active list = %#v", name, sjc.Updates[0].Status.Active)
}
if tc.expectCreate && !sjc.Updates[1].Status.LastScheduleTime.Time.Equal(topOfTheHour()) {
t.Errorf("%s: expected LastScheduleTime updated to %s, got %s", name, topOfTheHour(), sjc.Updates[1].Status.LastScheduleTime)
}
}
}

View File

@ -1,18 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cronjob contains the controller for CronJob objects.
package cronjob

View File

@ -1,252 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"fmt"
"sync"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
)
// sjControlInterface is an interface that knows how to update CronJob status
// created as an interface to allow testing.
type sjControlInterface interface {
UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error)
}
// realSJControl is the default implementation of sjControlInterface.
type realSJControl struct {
KubeClient clientset.Interface
}
var _ sjControlInterface = &realSJControl{}
func (c *realSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
return c.KubeClient.BatchV1beta1().CronJobs(sj.Namespace).UpdateStatus(sj)
}
// fakeSJControl is the default implementation of sjControlInterface.
type fakeSJControl struct {
Updates []batchv1beta1.CronJob
}
var _ sjControlInterface = &fakeSJControl{}
func (c *fakeSJControl) UpdateStatus(sj *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
c.Updates = append(c.Updates, *sj)
return sj, nil
}
// ------------------------------------------------------------------ //
// jobControlInterface is an interface that knows how to add or delete jobs
// created as an interface to allow testing.
type jobControlInterface interface {
// GetJob retrieves a Job.
GetJob(namespace, name string) (*batchv1.Job, error)
// CreateJob creates new Jobs according to the spec.
CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error)
// UpdateJob updates a Job.
UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error)
// PatchJob patches a Job.
PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error)
// DeleteJob deletes the Job identified by name.
// TODO: delete by UID?
DeleteJob(namespace string, name string) error
}
// realJobControl is the default implementation of jobControlInterface.
type realJobControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ jobControlInterface = &realJobControl{}
func copyLabels(template *batchv1beta1.JobTemplateSpec) labels.Set {
l := make(labels.Set)
for k, v := range template.Labels {
l[k] = v
}
return l
}
func copyAnnotations(template *batchv1beta1.JobTemplateSpec) labels.Set {
a := make(labels.Set)
for k, v := range template.Annotations {
a[k] = v
}
return a
}
func (r realJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
}
func (r realJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Update(job)
}
func (r realJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Patch(name, pt, data, subresources...)
}
func (r realJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
return r.KubeClient.BatchV1().Jobs(namespace).Create(job)
}
func (r realJobControl) DeleteJob(namespace string, name string) error {
return r.KubeClient.BatchV1().Jobs(namespace).Delete(name, nil)
}
type fakeJobControl struct {
sync.Mutex
Job *batchv1.Job
Jobs []batchv1.Job
DeleteJobName []string
Err error
UpdateJobName []string
PatchJobName []string
Patches [][]byte
}
var _ jobControlInterface = &fakeJobControl{}
func (f *fakeJobControl) CreateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
job.SelfLink = fmt.Sprintf("/api/batch/v1/namespaces/%s/jobs/%s", namespace, job.Name)
f.Jobs = append(f.Jobs, *job)
job.UID = "test-uid"
return job, nil
}
func (f *fakeJobControl) GetJob(namespace, name string) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
return f.Job, nil
}
func (f *fakeJobControl) UpdateJob(namespace string, job *batchv1.Job) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
f.UpdateJobName = append(f.UpdateJobName, job.Name)
return job, nil
}
func (f *fakeJobControl) PatchJob(namespace string, name string, pt types.PatchType, data []byte, subresources ...string) (*batchv1.Job, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
f.PatchJobName = append(f.PatchJobName, name)
f.Patches = append(f.Patches, data)
// We don't have anything to return. Just return something non-nil.
return &batchv1.Job{}, nil
}
func (f *fakeJobControl) DeleteJob(namespace string, name string) error {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return f.Err
}
f.DeleteJobName = append(f.DeleteJobName, name)
return nil
}
func (f *fakeJobControl) Clear() {
f.Lock()
defer f.Unlock()
f.DeleteJobName = []string{}
f.Jobs = []batchv1.Job{}
f.Err = nil
}
// ------------------------------------------------------------------ //
// podControlInterface is an interface that knows how to list or delete pods
// created as an interface to allow testing.
type podControlInterface interface {
// ListPods list pods
ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error)
// DeleteJob deletes the pod identified by name.
// TODO: delete by UID?
DeletePod(namespace string, name string) error
}
// realPodControl is the default implementation of podControlInterface.
type realPodControl struct {
KubeClient clientset.Interface
Recorder record.EventRecorder
}
var _ podControlInterface = &realPodControl{}
func (r realPodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
return r.KubeClient.CoreV1().Pods(namespace).List(opts)
}
func (r realPodControl) DeletePod(namespace string, name string) error {
return r.KubeClient.CoreV1().Pods(namespace).Delete(name, nil)
}
type fakePodControl struct {
sync.Mutex
Pods []v1.Pod
DeletePodName []string
Err error
}
var _ podControlInterface = &fakePodControl{}
func (f *fakePodControl) ListPods(namespace string, opts metav1.ListOptions) (*v1.PodList, error) {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return nil, f.Err
}
return &v1.PodList{Items: f.Pods}, nil
}
func (f *fakePodControl) DeletePod(namespace string, name string) error {
f.Lock()
defer f.Unlock()
if f.Err != nil {
return f.Err
}
f.DeletePodName = append(f.DeletePodName, name)
return nil
}

View File

@ -1,231 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"fmt"
"time"
"github.com/golang/glog"
"github.com/robfig/cron"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
// Utilities for dealing with Jobs and CronJobs and time.
func inActiveList(sj batchv1beta1.CronJob, uid types.UID) bool {
for _, j := range sj.Status.Active {
if j.UID == uid {
return true
}
}
return false
}
func deleteFromActiveList(sj *batchv1beta1.CronJob, uid types.UID) {
if sj == nil {
return
}
newActive := []v1.ObjectReference{}
for _, j := range sj.Status.Active {
if j.UID != uid {
newActive = append(newActive, j)
}
}
sj.Status.Active = newActive
}
// getParentUIDFromJob extracts UID of job's parent and whether it was found
func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) {
controllerRef := metav1.GetControllerOf(&j)
if controllerRef == nil {
return types.UID(""), false
}
if controllerRef.Kind != "CronJob" {
glog.V(4).Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace)
return types.UID(""), false
}
return controllerRef.UID, true
}
// groupJobsByParent groups jobs into a map keyed by the job parent UID (e.g. scheduledJob).
// It has no receiver, to facilitate testing.
func groupJobsByParent(js []batchv1.Job) map[types.UID][]batchv1.Job {
jobsBySj := make(map[types.UID][]batchv1.Job)
for _, job := range js {
parentUID, found := getParentUIDFromJob(job)
if !found {
glog.V(4).Infof("Unable to get parent uid from job %s in namespace %s", job.Name, job.Namespace)
continue
}
jobsBySj[parentUID] = append(jobsBySj[parentUID], job)
}
return jobsBySj
}
// getRecentUnmetScheduleTimes gets a slice of times (from oldest to latest) that have passed when a Job should have started but did not.
//
// If there are too many (>100) unstarted times, just give up and return an empty slice.
// If there were missed times prior to the last known start time, then those are not returned.
func getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time.Time, error) {
starts := []time.Time{}
sched, err := cron.ParseStandard(sj.Spec.Schedule)
if err != nil {
return starts, fmt.Errorf("Unparseable schedule: %s : %s", sj.Spec.Schedule, err)
}
var earliestTime time.Time
if sj.Status.LastScheduleTime != nil {
earliestTime = sj.Status.LastScheduleTime.Time
} else {
// If none found, then this is either a recently created scheduledJob,
// or the active/completed info was somehow lost (contract for status
// in kubernetes says it may need to be recreated), or that we have
// started a job, but have not noticed it yet (distributed systems can
// have arbitrary delays). In any case, use the creation time of the
// CronJob as last known start time.
earliestTime = sj.ObjectMeta.CreationTimestamp.Time
}
if sj.Spec.StartingDeadlineSeconds != nil {
// Controller is not going to schedule anything below this point
schedulingDeadline := now.Add(-time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds))
if schedulingDeadline.After(earliestTime) {
earliestTime = schedulingDeadline
}
}
if earliestTime.After(now) {
return []time.Time{}, nil
}
for t := sched.Next(earliestTime); !t.After(now); t = sched.Next(t) {
starts = append(starts, t)
// An object might miss several starts. For example, if
// controller gets wedged on friday at 5:01pm when everyone has
// gone home, and someone comes in on tuesday AM and discovers
// the problem and restarts the controller, then all the hourly
// jobs, more than 80 of them for one hourly scheduledJob, should
// all start running with no further intervention (if the scheduledJob
// allows concurrency and late starts).
//
// However, if there is a bug somewhere, or incorrect clock
// on controller's server or apiservers (for setting creationTimestamp)
// then there could be so many missed start times (it could be off
// by decades or more), that it would eat up all the CPU and memory
// of this controller. In that case, we want to not try to list
// all the missed start times.
//
// I've somewhat arbitrarily picked 100, as more than 80,
// but less than "lots".
if len(starts) > 100 {
// We can't get the most recent times so just return an empty slice
return []time.Time{}, fmt.Errorf("Too many missed start time (> 100). Set or decrease .spec.startingDeadlineSeconds or check clock skew.")
}
}
return starts, nil
}
// getJobFromTemplate makes a Job from a CronJob
func getJobFromTemplate(sj *batchv1beta1.CronJob, scheduledTime time.Time) (*batchv1.Job, error) {
labels := copyLabels(&sj.Spec.JobTemplate)
annotations := copyAnnotations(&sj.Spec.JobTemplate)
// We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice
name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime))
job := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
Annotations: annotations,
Name: name,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(sj, controllerKind)},
},
}
if err := legacyscheme.Scheme.Convert(&sj.Spec.JobTemplate.Spec, &job.Spec, nil); err != nil {
return nil, fmt.Errorf("unable to convert job template: %v", err)
}
return job, nil
}
// getTimeHash returns Unix Epoch Time
func getTimeHash(scheduledTime time.Time) int64 {
return scheduledTime.Unix()
}
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
func makeCreatedByRefJson(object runtime.Object) (string, error) {
createdByRef, err := ref.GetReference(legacyscheme.Scheme, object)
if err != nil {
return "", fmt.Errorf("unable to get controller reference: %v", err)
}
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning.
codec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef,
})
if err != nil {
return "", fmt.Errorf("unable to serialize controller reference: %v", err)
}
return string(createdByRefJson), nil
}
func getFinishedStatus(j *batchv1.Job) (bool, batchv1.JobConditionType) {
for _, c := range j.Status.Conditions {
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {
return true, c.Type
}
}
return false, ""
}
func IsJobFinished(j *batchv1.Job) bool {
isFinished, _ := getFinishedStatus(j)
return isFinished
}
// byJobStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker.
type byJobStartTime []batchv1.Job
func (o byJobStartTime) Len() int { return len(o) }
func (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byJobStartTime) Less(i, j int) bool {
if o[j].Status.StartTime == nil {
return o[i].Status.StartTime != nil
}
if o[i].Status.StartTime.Equal(o[j].Status.StartTime) {
return o[i].Name < o[j].Name
}
return o[i].Status.StartTime.Before(o[j].Status.StartTime)
}

View File

@ -1,378 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cronjob
import (
"strings"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
func boolptr(b bool) *bool { return &b }
func TestGetJobFromTemplate(t *testing.T) {
// getJobFromTemplate() needs to take the job template and copy the labels and annotations
// and other fields, and add a created-by reference.
var one int64 = 1
var no bool = false
sj := batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: "snazzycats",
UID: types.UID("1a2b3c"),
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/mycronjob",
},
Spec: batchv1beta1.CronJobSpec{
Schedule: "* * * * ?",
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
JobTemplate: batchv1beta1.JobTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"},
},
Spec: batchv1.JobSpec{
ActiveDeadlineSeconds: &one,
ManualSelector: &no,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
},
},
},
}
var job *batchv1.Job
job, err := getJobFromTemplate(&sj, time.Time{})
if err != nil {
t.Errorf("Did not expect error: %s", err)
}
if !strings.HasPrefix(job.ObjectMeta.Name, "mycronjob-") {
t.Errorf("Wrong Name")
}
if len(job.ObjectMeta.Labels) != 1 {
t.Errorf("Wrong number of labels")
}
if len(job.ObjectMeta.Annotations) != 1 {
t.Errorf("Wrong number of annotations")
}
}
func TestGetParentUIDFromJob(t *testing.T) {
j := &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: "foobar",
Namespace: metav1.NamespaceDefault,
},
Spec: batchv1.JobSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foo/bar"},
},
},
},
},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{{
Type: batchv1.JobComplete,
Status: v1.ConditionTrue,
}},
},
}
{
// Case 1: No ControllerRef
_, found := getParentUIDFromJob(*j)
if found {
t.Errorf("Unexpectedly found uid")
}
}
{
// Case 2: Has ControllerRef
j.ObjectMeta.SetOwnerReferences([]metav1.OwnerReference{
{
Kind: "CronJob",
UID: types.UID("5ef034e0-1890-11e6-8935-42010af0003e"),
Controller: boolptr(true),
},
})
expectedUID := types.UID("5ef034e0-1890-11e6-8935-42010af0003e")
uid, found := getParentUIDFromJob(*j)
if !found {
t.Errorf("Unexpectedly did not find uid")
} else if uid != expectedUID {
t.Errorf("Wrong UID: %v", uid)
}
}
}
func TestGroupJobsByParent(t *testing.T) {
uid1 := types.UID("11111111-1111-1111-1111-111111111111")
uid2 := types.UID("22222222-2222-2222-2222-222222222222")
uid3 := types.UID("33333333-3333-3333-3333-333333333333")
ownerReference1 := metav1.OwnerReference{
Kind: "CronJob",
UID: uid1,
Controller: boolptr(true),
}
ownerReference2 := metav1.OwnerReference{
Kind: "CronJob",
UID: uid2,
Controller: boolptr(true),
}
ownerReference3 := metav1.OwnerReference{
Kind: "CronJob",
UID: uid3,
Controller: boolptr(true),
}
{
// Case 1: There are no jobs and scheduledJobs
js := []batchv1.Job{}
jobsBySj := groupJobsByParent(js)
if len(jobsBySj) != 0 {
t.Errorf("Wrong number of items in map")
}
}
{
// Case 2: there is one controller with one job it created.
js := []batchv1.Job{
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference1}}},
}
jobsBySj := groupJobsByParent(js)
if len(jobsBySj) != 1 {
t.Errorf("Wrong number of items in map")
}
jobList1, found := jobsBySj[uid1]
if !found {
t.Errorf("Key not found")
}
if len(jobList1) != 1 {
t.Errorf("Wrong number of items in map")
}
}
{
// Case 3: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers.
// There are also two jobs with no created-by annotation.
js := []batchv1.Job{
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference1}}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference2}}},
{ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "x", OwnerReferences: []metav1.OwnerReference{ownerReference1}}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "x", OwnerReferences: []metav1.OwnerReference{}}},
{ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "y", OwnerReferences: []metav1.OwnerReference{ownerReference3}}},
{ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "y", OwnerReferences: []metav1.OwnerReference{ownerReference3}}},
{ObjectMeta: metav1.ObjectMeta{Name: "d", Namespace: "y", OwnerReferences: []metav1.OwnerReference{}}},
}
jobsBySj := groupJobsByParent(js)
if len(jobsBySj) != 3 {
t.Errorf("Wrong number of items in map")
}
jobList1, found := jobsBySj[uid1]
if !found {
t.Errorf("Key not found")
}
if len(jobList1) != 2 {
t.Errorf("Wrong number of items in map")
}
jobList2, found := jobsBySj[uid2]
if !found {
t.Errorf("Key not found")
}
if len(jobList2) != 1 {
t.Errorf("Wrong number of items in map")
}
jobList3, found := jobsBySj[uid3]
if !found {
t.Errorf("Key not found")
}
if len(jobList3) != 2 {
t.Errorf("Wrong number of items in map")
}
}
}
func TestGetRecentUnmetScheduleTimes(t *testing.T) {
// schedule is hourly on the hour
schedule := "0 * * * ?"
// T1 is a scheduled start time of that schedule
T1, err := time.Parse(time.RFC3339, "2016-05-19T10:00:00Z")
if err != nil {
t.Errorf("test setup error: %v", err)
}
// T2 is a scheduled start time of that schedule after T1
T2, err := time.Parse(time.RFC3339, "2016-05-19T11:00:00Z")
if err != nil {
t.Errorf("test setup error: %v", err)
}
sj := batchv1beta1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: "mycronjob",
Namespace: metav1.NamespaceDefault,
UID: types.UID("1a2b3c"),
},
Spec: batchv1beta1.CronJobSpec{
Schedule: schedule,
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
JobTemplate: batchv1beta1.JobTemplateSpec{},
},
}
{
// Case 1: no known start times, and none needed yet.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Current time is more than creation time, but less than T1.
now := T1.Add(-7 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 0 {
t.Errorf("expected no start times, got: %v", times)
}
}
{
// Case 2: no known start times, and one needed.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Current time is after T1
now := T1.Add(2 * time.Second)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 1 {
t.Errorf("expected 1 start time, got: %v", times)
} else if !times[0].Equal(T1) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
}
{
// Case 3: known LastScheduleTime, no start needed.
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Status shows a start at the expected time.
sj.Status.LastScheduleTime = &metav1.Time{Time: T1}
// Current time is after T1
now := T1.Add(2 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 0 {
t.Errorf("expected 0 start times, got: %v", times)
}
}
{
// Case 4: known LastScheduleTime, a start needed
// Creation time is before T1.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-10 * time.Minute)}
// Status shows a start at the expected time.
sj.Status.LastScheduleTime = &metav1.Time{Time: T1}
// Current time is after T1 and after T2
now := T2.Add(5 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 1 {
t.Errorf("expected 1 start times, got: %v", times)
} else if !times[0].Equal(T2) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
}
{
// Case 5: known LastScheduleTime, two starts needed
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
// Current time is after T1 and after T2
now := T2.Add(5 * time.Minute)
times, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(times) != 2 {
t.Errorf("expected 2 start times, got: %v", times)
} else {
if !times[0].Equal(T1) {
t.Errorf("expected: %v, got: %v", T1, times[0])
}
if !times[1].Equal(T2) {
t.Errorf("expected: %v, got: %v", T2, times[1])
}
}
}
{
// Case 6: now is way way ahead of last start time, and there is no deadline.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
now := T2.Add(10 * 24 * time.Hour)
_, err := getRecentUnmetScheduleTimes(sj, now)
if err == nil {
t.Errorf("expected an error")
}
}
{
// Case 7: now is way way ahead of last start time, but there is a short deadline.
sj.ObjectMeta.CreationTimestamp = metav1.Time{Time: T1.Add(-2 * time.Hour)}
sj.Status.LastScheduleTime = &metav1.Time{Time: T1.Add(-1 * time.Hour)}
now := T2.Add(10 * 24 * time.Hour)
// Deadline is short
deadline := int64(2 * 60 * 60)
sj.Spec.StartingDeadlineSeconds = &deadline
_, err := getRecentUnmetScheduleTimes(sj, now)
if err != nil {
t.Errorf("unexpected error")
}
}
}

View File

@ -1,110 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"daemon_controller.go",
"doc.go",
"update.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/daemon/util:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"daemon_controller_test.go",
"update_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/labels:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/daemon/util:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,8 +0,0 @@
approvers:
- mikedanese
reviewers:
- janetkuo
- lukaszo
- mikedanese
- tnozicka
- k82cn

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package daemon contains logic for watching and synchronizing
// daemons.
package daemon // import "k8s.io/kubernetes/pkg/controller/daemon"

View File

@ -1,441 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"bytes"
"fmt"
"sort"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/rand"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/daemon/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
// rollingUpdate deletes old daemon set pods making sure that no more than
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) error {
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
_, oldPods := dsc.getAllDaemonSetPods(ds, nodeToDaemonPods, hash)
maxUnavailable, numUnavailable, err := dsc.getUnavailableNumbers(ds, nodeToDaemonPods)
if err != nil {
return fmt.Errorf("Couldn't get unavailable numbers: %v", err)
}
oldAvailablePods, oldUnavailablePods := util.SplitByAvailablePods(ds.Spec.MinReadySeconds, oldPods)
// for oldPods delete all not running pods
var oldPodsToDelete []string
glog.V(4).Infof("Marking all unavailable old pods for deletion")
for _, pod := range oldUnavailablePods {
// Skip terminating pods. We won't delete them again
if pod.DeletionTimestamp != nil {
continue
}
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
}
glog.V(4).Infof("Marking old pods for deletion")
for _, pod := range oldAvailablePods {
if numUnavailable >= maxUnavailable {
glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
break
}
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
numUnavailable++
}
return dsc.syncNodes(ds, oldPodsToDelete, []string{}, hash)
}
// constructHistory finds all histories controlled by the given DaemonSet, and
// update current history revision number, or create current history if need to.
// It also deduplicates current history, and adds missing unique labels to existing histories.
func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
var histories []*apps.ControllerRevision
var currentHistories []*apps.ControllerRevision
histories, err = dsc.controlledHistories(ds)
if err != nil {
return nil, nil, err
}
for _, history := range histories {
// Add the unique label if it's not already added to the history
// We use history name instead of computing hash, so that we don't need to worry about hash collision
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
toUpdate := history.DeepCopy()
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
if err != nil {
return nil, nil, err
}
}
// Compare histories with ds to separate cur and old history
found := false
found, err = Match(ds, history)
if err != nil {
return nil, nil, err
}
if found {
currentHistories = append(currentHistories, history)
} else {
old = append(old, history)
}
}
currRevision := maxRevision(old) + 1
switch len(currentHistories) {
case 0:
// Create a new history if the current one isn't found
cur, err = dsc.snapshot(ds, currRevision)
if err != nil {
return nil, nil, err
}
default:
cur, err = dsc.dedupCurHistories(ds, currentHistories)
if err != nil {
return nil, nil, err
}
// Update revision number if necessary
if cur.Revision < currRevision {
toUpdate := cur.DeepCopy()
toUpdate.Revision = currRevision
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
if err != nil {
return nil, nil, err
}
}
}
return cur, old, err
}
func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
toKeep := int(*ds.Spec.RevisionHistoryLimit)
toKill := len(old) - toKeep
if toKill <= 0 {
return nil
}
// Find all hashes of live pods
liveHashes := make(map[string]bool)
for _, pods := range nodesToDaemonPods {
for _, pod := range pods {
if hash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
liveHashes[hash] = true
}
}
}
// Find all live history with the above hashes
liveHistory := make(map[string]bool)
for _, history := range old {
if hash := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
liveHistory[history.Name] = true
}
}
// Clean up old history from smallest to highest revision (from oldest to newest)
sort.Sort(historiesByRevision(old))
for _, history := range old {
if toKill <= 0 {
break
}
if liveHistory[history.Name] {
continue
}
// Clean up
err := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Delete(history.Name, nil)
if err != nil {
return err
}
toKill--
}
return nil
}
// maxRevision returns the max revision number of the given list of histories
func maxRevision(histories []*apps.ControllerRevision) int64 {
max := int64(0)
for _, history := range histories {
if history.Revision > max {
max = history.Revision
}
}
return max
}
func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
if len(curHistories) == 1 {
return curHistories[0], nil
}
var maxRevision int64
var keepCur *apps.ControllerRevision
for _, cur := range curHistories {
if cur.Revision >= maxRevision {
keepCur = cur
maxRevision = cur.Revision
}
}
// Clean up duplicates and relabel pods
for _, cur := range curHistories {
if cur.Name == keepCur.Name {
continue
}
// Relabel pods before dedup
pods, err := dsc.getDaemonPods(ds)
if err != nil {
return nil, err
}
for _, pod := range pods {
if pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] {
toUpdate := pod.DeepCopy()
if toUpdate.Labels == nil {
toUpdate.Labels = make(map[string]string)
}
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
if err != nil {
return nil, err
}
}
}
// Remove duplicates
err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Delete(cur.Name, nil)
if err != nil {
return nil, err
}
}
return keepCur, nil
}
// controlledHistories returns all ControllerRevisions controlled by the given DaemonSet.
// This also reconciles ControllerRef by adopting/orphaning.
// Note that returned histories are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first.
func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil {
return nil, err
}
// List all histories to include those that don't match the selector anymore
// but have a ControllerRef pointing to the controller.
histories, err := dsc.historyLister.List(labels.Everything())
if err != nil {
return nil, err
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing Pods (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != ds.UID {
return nil, fmt.Errorf("original DaemonSet %v/%v is gone: got uid %v, wanted %v", ds.Namespace, ds.Name, fresh.UID, ds.UID)
}
return fresh, nil
})
// Use ControllerRefManager to adopt/orphan as needed.
cm := controller.NewControllerRevisionControllerRefManager(dsc.crControl, ds, selector, controllerKind, canAdoptFunc)
return cm.ClaimControllerRevisions(histories)
}
// Match check if the given DaemonSet's template matches the template stored in the given history.
func Match(ds *apps.DaemonSet, history *apps.ControllerRevision) (bool, error) {
patch, err := getPatch(ds)
if err != nil {
return false, err
}
return bytes.Equal(patch, history.Data.Raw), nil
}
// getPatch returns a strategic merge patch that can be applied to restore a Daemonset to a
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
// recorded patches.
func getPatch(ds *apps.DaemonSet) ([]byte, error) {
dsBytes, err := json.Marshal(ds)
if err != nil {
return nil, err
}
var raw map[string]interface{}
err = json.Unmarshal(dsBytes, &raw)
if err != nil {
return nil, err
}
objCopy := make(map[string]interface{})
specCopy := make(map[string]interface{})
// Create a patch of the DaemonSet that replaces spec.template
spec := raw["spec"].(map[string]interface{})
template := spec["template"].(map[string]interface{})
specCopy["template"] = template
template["$patch"] = "replace"
objCopy["spec"] = specCopy
patch, err := json.Marshal(objCopy)
return patch, err
}
func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
patch, err := getPatch(ds)
if err != nil {
return nil, err
}
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
name := ds.Name + "-" + rand.SafeEncodeString(hash)
history := &apps.ControllerRevision{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ds.Namespace,
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, hash),
Annotations: ds.Annotations,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)},
},
Data: runtime.RawExtension{Raw: patch},
Revision: revision,
}
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
if errors.IsAlreadyExists(err) {
// TODO: Is it okay to get from historyLister?
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
if getErr != nil {
return nil, getErr
}
// Check if we already created it
done, err := Match(ds, existedHistory)
if err != nil {
return nil, err
}
if done {
return existedHistory, nil
}
// Handle name collisions between different history
// TODO: Is it okay to get from dsLister?
currDS, getErr := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
if getErr != nil {
return nil, getErr
}
if currDS.Status.CollisionCount == nil {
currDS.Status.CollisionCount = new(int32)
}
*currDS.Status.CollisionCount++
_, updateErr := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).UpdateStatus(currDS)
if updateErr != nil {
return nil, updateErr
}
glog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
return nil, err
}
return history, err
}
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
var newPods []*v1.Pod
var oldPods []*v1.Pod
for _, pods := range nodeToDaemonPods {
for _, pod := range pods {
// If the returned error is not nil we have a parse error.
// The controller handles this via the hash.
generation, err := util.GetTemplateGeneration(ds)
if err != nil {
generation = nil
}
if util.IsPodUpdated(pod, hash, generation) {
newPods = append(newPods, pod)
} else {
oldPods = append(oldPods, pod)
}
}
}
return newPods, oldPods
}
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
glog.V(4).Infof("Getting unavailable numbers")
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return -1, -1, fmt.Errorf("couldn't get list of nodes during rolling update of daemon set %#v: %v", ds, err)
}
var numUnavailable, desiredNumberScheduled int
for i := range nodeList {
node := nodeList[i]
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
return -1, -1, err
}
if !wantToRun {
continue
}
desiredNumberScheduled++
daemonPods, exists := nodeToDaemonPods[node.Name]
if !exists {
numUnavailable++
continue
}
available := false
for _, pod := range daemonPods {
//for the purposes of update we ensure that the Pod is both available and not terminating
if podutil.IsPodAvailable(pod, ds.Spec.MinReadySeconds, metav1.Now()) && pod.DeletionTimestamp == nil {
available = true
break
}
}
if !available {
numUnavailable++
}
}
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, desiredNumberScheduled, true)
if err != nil {
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
}
glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
return maxUnavailable, numUnavailable, nil
}
type historiesByRevision []*apps.ControllerRevision
func (h historiesByRevision) Len() int { return len(h) }
func (h historiesByRevision) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h historiesByRevision) Less(i, j int) bool {
return h[i].Revision < h[j].Revision
}

View File

@ -1,307 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func TestDaemonSetUpdatesPods(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 2
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
// new pods are not ready numUnavailable == maxUnavailable
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
// all old pods are unavailable so should be removed
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
// template is not changed no pod should be removed
clearExpectations(t, manager, ds, podControl)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestGetUnavailableNumbers(t *testing.T) {
cases := []struct {
name string
Manager *daemonSetsController
ds *apps.DaemonSet
nodeToPods map[string][]*v1.Pod
maxUnavailable int
numUnavailable int
Err error
}{
{
name: "No nodes",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromInt(0)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: make(map[string][]*v1.Pod),
maxUnavailable: 0,
numUnavailable: 0,
},
{
name: "Two nodes with ready pods",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromInt(1)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
numUnavailable: 0,
},
{
name: "Two nodes, one node without pods",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromInt(0)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
markPodReady(pod0)
mapping["node-0"] = []*v1.Pod{pod0}
return mapping
}(),
maxUnavailable: 0,
numUnavailable: 1,
},
{
name: "Two nodes with pods, MaxUnavailable in percents",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromString("50%")
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
numUnavailable: 0,
},
{
name: "Two nodes with pods, MaxUnavailable in percents, pod terminating",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
intStr := intstr.FromString("50%")
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
now := metav1.Now()
markPodReady(pod0)
markPodReady(pod1)
pod1.DeletionTimestamp = &now
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
numUnavailable: 1,
},
}
for _, c := range cases {
c.Manager.dsStore.Add(c.ds)
maxUnavailable, numUnavailable, err := c.Manager.getUnavailableNumbers(c.ds, c.nodeToPods)
if err != nil && c.Err != nil {
if c.Err != err {
t.Errorf("Test case: %s. Expected error: %v but got: %v", c.name, c.Err, err)
}
} else if err != nil {
t.Errorf("Test case: %s. Unexpected error: %v", c.name, err)
} else if maxUnavailable != c.maxUnavailable || numUnavailable != c.numUnavailable {
t.Errorf("Test case: %s. Wrong values. maxUnavailable: %d, expected: %d, numUnavailable: %d. expected: %d", c.name, maxUnavailable, c.maxUnavailable, numUnavailable, c.numUnavailable)
}
}
}

View File

@ -1,55 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["daemonset_util.go"],
importpath = "k8s.io/kubernetes/pkg/controller/daemon/util",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["daemonset_util_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
)

View File

@ -1,250 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"strconv"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
)
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
// deprecated annotation. If no annotation is found nil is returned. If the annotation is found and fails to parse
// nil is returned with an error. If the generation can be parsed from the annotation, a pointer to the parsed int64
// value is returned.
func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
annotation, found := ds.Annotations[apps.DeprecatedTemplateGeneration]
if !found {
return nil, nil
}
generation, err := strconv.ParseInt(annotation, 10, 64)
if err != nil {
return nil, err
}
return &generation, nil
}
// CreatePodTemplate returns copy of provided template with additional
// label which contains templateGeneration (for backward compatibility),
// hash of provided template and sets default daemon tolerations.
func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
newTemplate := *template.DeepCopy()
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint notReady:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns not ready.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
// Add infinite toleration for taint unreachable:NoExecute here
// to survive taint-based eviction enforced by NodeController
// when node turns unreachable.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
// OutOfDisk taint.
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeDiskPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeMemoryPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
// TODO(#48843) OutOfDisk taints will be removed in 1.10
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
kubelettypes.IsCritical(newTemplate.Namespace, newTemplate.Annotations) {
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
Key: algorithm.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
}
if newTemplate.ObjectMeta.Labels == nil {
newTemplate.ObjectMeta.Labels = make(map[string]string)
}
if generation != nil {
newTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] = fmt.Sprint(*generation)
}
// TODO: do we need to validate if the DaemonSet is RollingUpdate or not?
if len(hash) > 0 {
newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash
}
return newTemplate
}
// IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash
func IsPodUpdated(pod *v1.Pod, hash string, dsTemplateGeneration *int64) bool {
// Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration
templateMatches := dsTemplateGeneration != nil &&
pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash
return hashMatches || templateMatches
}
// SplitByAvailablePods splits provided daemon set pods by availability
func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*v1.Pod) {
unavailablePods := []*v1.Pod{}
availablePods := []*v1.Pod{}
for _, pod := range pods {
if podutil.IsPodAvailable(pod, minReadySeconds, metav1.Now()) {
availablePods = append(availablePods, pod)
} else {
unavailablePods = append(unavailablePods, pod)
}
}
return availablePods, unavailablePods
}
// ReplaceDaemonSetPodNodeNameNodeAffinity replaces the RequiredDuringSchedulingIgnoredDuringExecution
// NodeAffinity of the given affinity with a new NodeAffinity that selects the given nodeName.
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
nodeSelReq := v1.NodeSelectorRequirement{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodename},
}
nodeSelector := &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
},
},
}
if affinity == nil {
return &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
},
}
}
if affinity.NodeAffinity == nil {
affinity.NodeAffinity = &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
}
return affinity
}
nodeAffinity := affinity.NodeAffinity
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = nodeSelector
return affinity
}
// Replace node selector with the new one.
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
},
}
return affinity
}
// AppendNoScheduleTolerationIfNotExist appends unschedulable toleration to `.spec` if not exist; otherwise,
// no changes to `.spec.tolerations`.
func AppendNoScheduleTolerationIfNotExist(tolerations []v1.Toleration) []v1.Toleration {
unschedulableToleration := v1.Toleration{
Key: algorithm.TaintNodeUnschedulable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
}
unschedulableTaintExist := false
for _, t := range tolerations {
if apiequality.Semantic.DeepEqual(t, unschedulableToleration) {
unschedulableTaintExist = true
break
}
}
if !unschedulableTaintExist {
tolerations = append(tolerations, unschedulableToleration)
}
return tolerations
}
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.
func GetTargetNodeName(pod *v1.Pod) (string, error) {
if len(pod.Spec.NodeName) != 0 {
return pod.Spec.NodeName, nil
}
// If ScheduleDaemonSetPods was enabled before, retrieve node name of unscheduled pods from NodeAffinity
if pod.Spec.Affinity == nil ||
pod.Spec.Affinity.NodeAffinity == nil ||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return "", fmt.Errorf("no spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution for pod %s/%s",
pod.Namespace, pod.Name)
}
terms := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
if len(terms) < 1 {
return "", fmt.Errorf("no nodeSelectorTerms in requiredDuringSchedulingIgnoredDuringExecution of pod %s/%s",
pod.Namespace, pod.Name)
}
for _, term := range terms {
for _, exp := range term.MatchFields {
if exp.Key == algorithm.NodeFieldSelectorKeyNodeName &&
exp.Operator == v1.NodeSelectorOpIn {
if len(exp.Values) != 1 {
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
algorithm.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
}
return exp.Values[0], nil
}
}
}
return "", fmt.Errorf("no node name found for pod %s/%s", pod.Namespace, pod.Name)
}

View File

@ -1,599 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"reflect"
"testing"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
)
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
ObjectMeta: metav1.ObjectMeta{
Labels: label,
Namespace: metav1.NamespaceDefault,
},
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
}
pod.Name = podName
return pod
}
func TestIsPodUpdated(t *testing.T) {
templateGeneration := int64Ptr(12345)
badGeneration := int64Ptr(12345)
hash := "55555"
labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash}
labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)}
tests := []struct {
test string
templateGeneration *int64
pod *v1.Pod
hash string
isUpdated bool
}{
{
"templateGeneration and hash both match",
templateGeneration,
newPod("pod1", "node1", labels),
hash,
true,
},
{
"templateGeneration matches, hash doesn't",
templateGeneration,
newPod("pod1", "node1", labels),
hash + "123",
true,
},
{
"templateGeneration matches, no hash label, has hash",
templateGeneration,
newPod("pod1", "node1", labelsNoHash),
hash,
true,
},
{
"templateGeneration matches, no hash label, no hash",
templateGeneration,
newPod("pod1", "node1", labelsNoHash),
"",
true,
},
{
"templateGeneration matches, has hash label, no hash",
templateGeneration,
newPod("pod1", "node1", labels),
"",
true,
},
{
"templateGeneration doesn't match, hash does",
badGeneration,
newPod("pod1", "node1", labels),
hash,
true,
},
{
"templateGeneration and hash don't match",
badGeneration,
newPod("pod1", "node1", labels),
hash + "123",
false,
},
{
"empty labels, no hash",
templateGeneration,
newPod("pod1", "node1", map[string]string{}),
"",
false,
},
{
"empty labels",
templateGeneration,
newPod("pod1", "node1", map[string]string{}),
hash,
false,
},
{
"no labels",
templateGeneration,
newPod("pod1", "node1", nil),
hash,
false,
},
}
for _, test := range tests {
updated := IsPodUpdated(test.pod, test.hash, test.templateGeneration)
if updated != test.isUpdated {
t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated)
}
}
}
func TestCreatePodTemplate(t *testing.T) {
tests := []struct {
templateGeneration *int64
hash string
expectUniqueLabel bool
}{
{int64Ptr(1), "", false},
{int64Ptr(2), "3242341807", true},
}
for _, test := range tests {
podTemplateSpec := v1.PodTemplateSpec{}
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
if !exists || val != fmt.Sprint(*test.templateGeneration) {
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
}
val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
if test.expectUniqueLabel && (!exists || val != test.hash) {
t.Errorf("Expected podTemplateSpec to have hash label value: %s, got: %s", test.hash, val)
}
if !test.expectUniqueLabel && exists {
t.Errorf("Expected podTemplateSpec to have no hash label, got: %s", val)
}
}
}
func int64Ptr(i int) *int64 {
li := int64(i)
return &li
}
func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
tests := []struct {
affinity *v1.Affinity
hostname string
expected *v1.Affinity
}{
{
affinity: nil,
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
{
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: kubeletapis.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
{
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: kubeletapis.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: kubeletapis.LabelHostname,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
{
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1", "host_2"},
},
},
},
},
},
},
},
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
{
affinity: nil,
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
{
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "hostname",
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_2"},
},
},
},
},
},
},
},
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
{
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpNotIn,
Values: []string{"host_2"},
},
},
},
},
},
},
},
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
{
affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
// NOTE: Only `metadata.name` is valid key in `MatchFields` in 1.11;
// added this case for compatibility: the feature works as normal
// when new Keys introduced.
Key: "metadata.foo",
Operator: v1.NodeSelectorOpIn,
Values: []string{"bar"},
},
},
},
},
},
},
},
hostname: "host_1",
expected: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
},
},
},
},
},
},
},
}
for i, test := range tests {
got := ReplaceDaemonSetPodNodeNameNodeAffinity(test.affinity, test.hostname)
if !reflect.DeepEqual(test.expected, got) {
t.Errorf("Failed to append NodeAffinity in case %d, got: %v, expected: %v",
i, got, test.expected)
}
}
}
func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) {
for _, fg := range gates {
func() {
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
defer func() {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
}()
for _, f := range []bool{true, false} {
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
}
}()
}
}
func TestGetTargetNodeName(t *testing.T) {
testFun := func(t *testing.T) {
tests := []struct {
pod *v1.Pod
nodeName string
expectedErr bool
}{
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "default",
},
Spec: v1.PodSpec{
NodeName: "node-1",
},
},
nodeName: "node-1",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "default",
},
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"node-1"},
},
},
},
},
},
},
},
},
},
nodeName: "node-1",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod3",
Namespace: "default",
},
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"node-1", "node-2"},
},
},
},
},
},
},
},
},
},
expectedErr: true,
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod4",
Namespace: "default",
},
Spec: v1.PodSpec{},
},
expectedErr: true,
},
}
for _, test := range tests {
got, err := GetTargetNodeName(test.pod)
if test.expectedErr != (err != nil) {
t.Errorf("Unexpected error, expectedErr: %v, err: %v", test.expectedErr, err)
} else if !test.expectedErr {
if test.nodeName != got {
t.Errorf("Failed to get target node name, got: %v, expected: %v", got, test.nodeName)
}
}
}
}
forEachFeatureGate(t, testFun, features.ScheduleDaemonSetPods)
}

View File

@ -1,105 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"deployment_controller.go",
"progress.go",
"recreate.go",
"rollback.go",
"rolling.go",
"sync.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/deployment",
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"deployment_controller_test.go",
"progress_test.go",
"recreate_test.go",
"rolling_test.go",
"sync_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/apps/install:go_default_library",
"//pkg/apis/authentication/install:go_default_library",
"//pkg/apis/authorization/install:go_default_library",
"//pkg/apis/autoscaling/install:go_default_library",
"//pkg/apis/batch/install:go_default_library",
"//pkg/apis/certificates/install:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/policy/install:go_default_library",
"//pkg/apis/rbac/install:go_default_library",
"//pkg/apis/settings/install:go_default_library",
"//pkg/apis/storage/install:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/deployment/util:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,11 +0,0 @@
approvers:
- janetkuo
- nikhiljindal
- kargakis
- mfojtik
reviewers:
- janetkuo
- nikhiljindal
- kargakis
- mfojtik
- tnozicka

View File

@ -1,647 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package deployment contains all the logic for handling Kubernetes Deployments.
// It implements a set of strategies (rolling, recreate) for deploying an application,
// the means to rollback to previous versions, proportional scaling for mitigating
// risk, cleanup policy, and other useful features of Deployments.
package deployment
import (
"fmt"
"reflect"
"time"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/util/metrics"
)
const (
// maxRetries is the number of times a deployment will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
)
// controllerKind contains the schema.GroupVersionKind for this controller type.
var controllerKind = apps.SchemeGroupVersion.WithKind("Deployment")
// DeploymentController is responsible for synchronizing Deployment objects stored
// in the system with actual running replica sets and pods.
type DeploymentController struct {
// rsControl is used for adopting/releasing replica sets.
rsControl controller.RSControlInterface
client clientset.Interface
eventRecorder record.EventRecorder
// To allow injection of syncDeployment for testing.
syncHandler func(dKey string) error
// used for unit testing
enqueueDeployment func(deployment *apps.Deployment)
// dLister can list/get deployments from the shared informer's store
dLister appslisters.DeploymentLister
// rsLister can list/get replica sets from the shared informer's store
rsLister appslisters.ReplicaSetLister
// podLister can list/get pods from the shared informer's store
podLister corelisters.PodLister
// dListerSynced returns true if the Deployment store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
dListerSynced cache.InformerSynced
// rsListerSynced returns true if the ReplicaSet store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
rsListerSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podListerSynced cache.InformerSynced
// Deployments that need to be synced
queue workqueue.RateLimitingInterface
}
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
dc := &DeploymentController{
client: client,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
}
dc.rsControl = controller.RealRSControl{
KubeClient: client,
Recorder: dc.eventRecorder,
}
dInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDeployment,
UpdateFunc: dc.updateDeployment,
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
DeleteFunc: dc.deleteDeployment,
})
rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addReplicaSet,
UpdateFunc: dc.updateReplicaSet,
DeleteFunc: dc.deleteReplicaSet,
})
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: dc.deletePod,
})
dc.syncHandler = dc.syncDeployment
dc.enqueueDeployment = dc.enqueue
dc.dLister = dInformer.Lister()
dc.rsLister = rsInformer.Lister()
dc.podLister = podInformer.Lister()
dc.dListerSynced = dInformer.Informer().HasSynced
dc.rsListerSynced = rsInformer.Informer().HasSynced
dc.podListerSynced = podInformer.Informer().HasSynced
return dc, nil
}
// Run begins watching and syncing.
func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dc.queue.ShutDown()
glog.Infof("Starting deployment controller")
defer glog.Infof("Shutting down deployment controller")
if !controller.WaitForCacheSync("deployment", stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(dc.worker, time.Second, stopCh)
}
<-stopCh
}
func (dc *DeploymentController) addDeployment(obj interface{}) {
d := obj.(*apps.Deployment)
glog.V(4).Infof("Adding deployment %s", d.Name)
dc.enqueueDeployment(d)
}
func (dc *DeploymentController) updateDeployment(old, cur interface{}) {
oldD := old.(*apps.Deployment)
curD := cur.(*apps.Deployment)
glog.V(4).Infof("Updating deployment %s", oldD.Name)
dc.enqueueDeployment(curD)
}
func (dc *DeploymentController) deleteDeployment(obj interface{}) {
d, ok := obj.(*apps.Deployment)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
d, ok = tombstone.Obj.(*apps.Deployment)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Deployment %#v", obj))
return
}
}
glog.V(4).Infof("Deleting deployment %s", d.Name)
dc.enqueueDeployment(d)
}
// addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created.
func (dc *DeploymentController) addReplicaSet(obj interface{}) {
rs := obj.(*apps.ReplicaSet)
if rs.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
// show up in a state that is already pending deletion.
dc.deleteReplicaSet(rs)
return
}
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(rs); controllerRef != nil {
d := dc.resolveControllerRef(rs.Namespace, controllerRef)
if d == nil {
return
}
glog.V(4).Infof("ReplicaSet %s added.", rs.Name)
dc.enqueueDeployment(d)
return
}
// Otherwise, it's an orphan. Get a list of all matching Deployments and sync
// them to see if anyone wants to adopt it.
ds := dc.getDeploymentsForReplicaSet(rs)
if len(ds) == 0 {
return
}
glog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name)
for _, d := range ds {
dc.enqueueDeployment(d)
}
}
// getDeploymentsForReplicaSet returns a list of Deployments that potentially
// match a ReplicaSet.
func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet) []*apps.Deployment {
deployments, err := dc.dLister.GetDeploymentsForReplicaSet(rs)
if err != nil || len(deployments) == 0 {
return nil
}
// Because all ReplicaSet's belonging to a deployment should have a unique label key,
// there should never be more than one deployment returned by the above method.
// If that happens we should probably dynamically repair the situation by ultimately
// trying to clean up one of the controllers, for now we just return the older one
if len(deployments) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
glog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s",
rs.Namespace, rs.Name, rs.Labels, deployments[0].Namespace, deployments[0].Name)
}
return deployments
}
// updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet
// is updated and wake them up. If the anything of the ReplicaSets have changed, we need to
// awaken both the old and new deployments. old and cur must be *apps.ReplicaSet
// types.
func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
curRS := cur.(*apps.ReplicaSet)
oldRS := old.(*apps.ReplicaSet)
if curRS.ResourceVersion == oldRS.ResourceVersion {
// Periodic resync will send update events for all known replica sets.
// Two different versions of the same replica set will always have different RVs.
return
}
curControllerRef := metav1.GetControllerOf(curRS)
oldControllerRef := metav1.GetControllerOf(oldRS)
controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if d := dc.resolveControllerRef(oldRS.Namespace, oldControllerRef); d != nil {
dc.enqueueDeployment(d)
}
}
// If it has a ControllerRef, that's all that matters.
if curControllerRef != nil {
d := dc.resolveControllerRef(curRS.Namespace, curControllerRef)
if d == nil {
return
}
glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name)
dc.enqueueDeployment(d)
return
}
// Otherwise, it's an orphan. If anything changed, sync matching controllers
// to see if anyone wants to adopt it now.
labelChanged := !reflect.DeepEqual(curRS.Labels, oldRS.Labels)
if labelChanged || controllerRefChanged {
ds := dc.getDeploymentsForReplicaSet(curRS)
if len(ds) == 0 {
return
}
glog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name)
for _, d := range ds {
dc.enqueueDeployment(d)
}
}
}
// deleteReplicaSet enqueues the deployment that manages a ReplicaSet when
// the ReplicaSet is deleted. obj could be an *apps.ReplicaSet, or
// a DeletionFinalStateUnknown marker item.
func (dc *DeploymentController) deleteReplicaSet(obj interface{}) {
rs, ok := obj.(*apps.ReplicaSet)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the ReplicaSet
// changed labels the new deployment will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
rs, ok = tombstone.Obj.(*apps.ReplicaSet)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ReplicaSet %#v", obj))
return
}
}
controllerRef := metav1.GetControllerOf(rs)
if controllerRef == nil {
// No controller should care about orphans being deleted.
return
}
d := dc.resolveControllerRef(rs.Namespace, controllerRef)
if d == nil {
return
}
glog.V(4).Infof("ReplicaSet %s deleted.", rs.Name)
dc.enqueueDeployment(d)
}
// deletePod will enqueue a Recreate Deployment once all of its pods have stopped running.
func (dc *DeploymentController) deletePod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the Pod
// changed labels the new deployment will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %#v", obj))
return
}
}
glog.V(4).Infof("Pod %s deleted.", pod.Name)
if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
// Sync if this Deployment now has no more Pods.
rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1()))
if err != nil {
return
}
podMap, err := dc.getPodMapForDeployment(d, rsList)
if err != nil {
return
}
numPods := 0
for _, podList := range podMap {
numPods += len(podList.Items)
}
if numPods == 0 {
dc.enqueueDeployment(d)
}
}
}
func (dc *DeploymentController) enqueue(deployment *apps.Deployment) {
key, err := controller.KeyFunc(deployment)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
return
}
dc.queue.Add(key)
}
func (dc *DeploymentController) enqueueRateLimited(deployment *apps.Deployment) {
key, err := controller.KeyFunc(deployment)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
return
}
dc.queue.AddRateLimited(key)
}
// enqueueAfter will enqueue a deployment after the provided amount of time.
func (dc *DeploymentController) enqueueAfter(deployment *apps.Deployment, after time.Duration) {
key, err := controller.KeyFunc(deployment)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", deployment, err))
return
}
dc.queue.AddAfter(key, after)
}
// getDeploymentForPod returns the deployment managing the given Pod.
func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deployment {
// Find the owning replica set
var rs *apps.ReplicaSet
var err error
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
// No controller owns this Pod.
return nil
}
if controllerRef.Kind != apps.SchemeGroupVersion.WithKind("ReplicaSet").Kind {
// Not a pod owned by a replica set.
return nil
}
rs, err = dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
if err != nil || rs.UID != controllerRef.UID {
glog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err)
return nil
}
// Now find the Deployment that owns that ReplicaSet.
controllerRef = metav1.GetControllerOf(rs)
if controllerRef == nil {
return nil
}
return dc.resolveControllerRef(rs.Namespace, controllerRef)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.Deployment {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
return nil
}
d, err := dc.dLister.Deployments(namespace).Get(controllerRef.Name)
if err != nil {
return nil
}
if d.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return d
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (dc *DeploymentController) worker() {
for dc.processNextWorkItem() {
}
}
func (dc *DeploymentController) processNextWorkItem() bool {
key, quit := dc.queue.Get()
if quit {
return false
}
defer dc.queue.Done(key)
err := dc.syncHandler(key.(string))
dc.handleErr(err, key)
return true
}
func (dc *DeploymentController) handleErr(err error, key interface{}) {
if err == nil {
dc.queue.Forget(key)
return
}
if dc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing deployment %v: %v", key, err)
dc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
dc.queue.Forget(key)
}
// getReplicaSetsForDeployment uses ControllerRefManager to reconcile
// ControllerRef by adopting and orphaning.
// It returns the list of ReplicaSets that this Deployment should manage.
func (dc *DeploymentController) getReplicaSetsForDeployment(d *apps.Deployment) ([]*apps.ReplicaSet, error) {
// List all ReplicaSets to find those we own but that no longer match our
// selector. They will be orphaned by ClaimReplicaSets().
rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
deploymentSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("deployment %s/%s has invalid label selector: %v", d.Namespace, d.Name, err)
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing ReplicaSets (see #42639).
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != d.UID {
return nil, fmt.Errorf("original Deployment %v/%v is gone: got uid %v, wanted %v", d.Namespace, d.Name, fresh.UID, d.UID)
}
return fresh, nil
})
cm := controller.NewReplicaSetControllerRefManager(dc.rsControl, d, deploymentSelector, controllerKind, canAdoptFunc)
return cm.ClaimReplicaSets(rsList)
}
// getPodMapForDeployment returns the Pods managed by a Deployment.
//
// It returns a map from ReplicaSet UID to a list of Pods controlled by that RS,
// according to the Pod's ControllerRef.
func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID]*v1.PodList, error) {
// Get all Pods that potentially belong to this Deployment.
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return nil, err
}
pods, err := dc.podLister.Pods(d.Namespace).List(selector)
if err != nil {
return nil, err
}
// Group Pods by their controller (if it's in rsList).
podMap := make(map[types.UID]*v1.PodList, len(rsList))
for _, rs := range rsList {
podMap[rs.UID] = &v1.PodList{}
}
for _, pod := range pods {
// Do not ignore inactive Pods because Recreate Deployments need to verify that no
// Pods from older versions are running before spinning up new Pods.
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
continue
}
// Only append if we care about this UID.
if podList, ok := podMap[controllerRef.UID]; ok {
podList.Items = append(podList.Items, *pod)
}
}
return podMap, nil
}
// syncDeployment will sync the deployment with the given key.
// This function is not meant to be invoked concurrently with the same key.
func (dc *DeploymentController) syncDeployment(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
deployment, err := dc.dLister.Deployments(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(2).Infof("Deployment %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
d := deployment.DeepCopy()
everything := metav1.LabelSelector{}
if reflect.DeepEqual(d.Spec.Selector, &everything) {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
}
return nil
}
// List ReplicaSets owned by this Deployment, while reconciling ControllerRef
// through adoption/orphaning.
rsList, err := dc.getReplicaSetsForDeployment(d)
if err != nil {
return err
}
// List all Pods owned by this Deployment, grouped by their ReplicaSet.
// Current uses of the podMap are:
//
// * check if a Pod is labeled correctly with the pod-template-hash label.
// * check that no old Pods are running in the middle of Recreate Deployments.
podMap, err := dc.getPodMapForDeployment(d, rsList)
if err != nil {
return err
}
if d.DeletionTimestamp != nil {
return dc.syncStatusOnly(d, rsList, podMap)
}
// Update deployment conditions with an Unknown condition when pausing/resuming
// a deployment. In this way, we can be sure that we won't timeout when a user
// resumes a Deployment with a set progressDeadlineSeconds.
if err = dc.checkPausedConditions(d); err != nil {
return err
}
if d.Spec.Paused {
return dc.sync(d, rsList, podMap)
}
// rollback is not re-entrant in case the underlying replica sets are updated with a new
// revision so we should ensure that we won't proceed to update replica sets until we
// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
if getRollbackTo(d) != nil {
return dc.rollback(d, rsList, podMap)
}
scalingEvent, err := dc.isScalingEvent(d, rsList, podMap)
if err != nil {
return err
}
if scalingEvent {
return dc.sync(d, rsList, podMap)
}
switch d.Spec.Strategy.Type {
case apps.RecreateDeploymentStrategyType:
return dc.rolloutRecreate(d, rsList, podMap)
case apps.RollingUpdateDeploymentStrategyType:
return dc.rolloutRolling(d, rsList, podMap)
}
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
}

View File

@ -1,987 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"strconv"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
_ "k8s.io/kubernetes/pkg/apis/apps/install"
_ "k8s.io/kubernetes/pkg/apis/authentication/install"
_ "k8s.io/kubernetes/pkg/apis/authorization/install"
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/certificates/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
_ "k8s.io/kubernetes/pkg/apis/policy/install"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
_ "k8s.io/kubernetes/pkg/apis/settings/install"
_ "k8s.io/kubernetes/pkg/apis/storage/install"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
var (
alwaysReady = func() bool { return true }
noTimestamp = metav1.Time{}
)
func rs(name string, replicas int, selector map[string]string, timestamp metav1.Time) *apps.ReplicaSet {
return &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
CreationTimestamp: timestamp,
Namespace: metav1.NamespaceDefault,
},
Spec: apps.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{},
},
}
}
func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *apps.ReplicaSet {
rs := rs(name, specReplicas, selector, noTimestamp)
rs.Status = apps.ReplicaSetStatus{
Replicas: int32(statusReplicas),
}
return rs
}
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *apps.Deployment {
d := apps.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: name,
Namespace: metav1.NamespaceDefault,
Annotations: make(map[string]string),
},
Spec: apps.DeploymentSpec{
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
},
},
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &metav1.LabelSelector{MatchLabels: selector},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: selector,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: "foo/bar",
},
},
},
},
RevisionHistoryLimit: revisionHistoryLimit,
},
}
if maxSurge != nil {
d.Spec.Strategy.RollingUpdate.MaxSurge = maxSurge
}
if maxUnavailable != nil {
d.Spec.Strategy.RollingUpdate.MaxUnavailable = maxUnavailable
}
return &d
}
func newReplicaSet(d *apps.Deployment, name string, replicas int) *apps.ReplicaSet {
return &apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
UID: uuid.NewUUID(),
Namespace: metav1.NamespaceDefault,
Labels: d.Spec.Selector.MatchLabels,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
},
Spec: apps.ReplicaSetSpec{
Selector: d.Spec.Selector,
Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: d.Spec.Template,
},
}
}
func getKey(d *apps.Deployment, t *testing.T) string {
if key, err := controller.KeyFunc(d); err != nil {
t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err)
return ""
} else {
return key
}
}
type fixture struct {
t *testing.T
client *fake.Clientset
// Objects to put in the store.
dLister []*apps.Deployment
rsLister []*apps.ReplicaSet
podLister []*v1.Pod
// Actions expected to happen on the client. Objects from here are also
// preloaded into NewSimpleFake.
actions []core.Action
objects []runtime.Object
}
func (f *fixture) expectGetDeploymentAction(d *apps.Deployment) {
action := core.NewGetAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d.Name)
f.actions = append(f.actions, action)
}
func (f *fixture) expectUpdateDeploymentStatusAction(d *apps.Deployment) {
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
action.Subresource = "status"
f.actions = append(f.actions, action)
}
func (f *fixture) expectUpdateDeploymentAction(d *apps.Deployment) {
action := core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d)
f.actions = append(f.actions, action)
}
func (f *fixture) expectCreateRSAction(rs *apps.ReplicaSet) {
f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Resource: "replicasets"}, rs.Namespace, rs))
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
return f
}
func (f *fixture) newController() (*DeploymentController, informers.SharedInformerFactory, error) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc())
c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), f.client)
if err != nil {
return nil, nil, err
}
c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
informers.Apps().V1().Deployments().Informer().GetIndexer().Add(d)
}
for _, rs := range f.rsLister {
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
}
for _, pod := range f.podLister {
informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
}
return c, informers, nil
}
func (f *fixture) runExpectError(deploymentName string, startInformers bool) {
f.run_(deploymentName, startInformers, true)
}
func (f *fixture) run(deploymentName string) {
f.run_(deploymentName, true, false)
}
func (f *fixture) run_(deploymentName string, startInformers bool, expectError bool) {
c, informers, err := f.newController()
if err != nil {
f.t.Fatalf("error creating Deployment controller: %v", err)
}
if startInformers {
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
}
err = c.syncDeployment(deploymentName)
if !expectError && err != nil {
f.t.Errorf("error syncing deployment: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing deployment, got nil")
}
actions := filterInformerActions(f.client.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
if !(expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) && action.GetSubresource() == expectedAction.GetSubresource()) {
f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
continue
}
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
}
func filterInformerActions(actions []core.Action) []core.Action {
ret := []core.Action{}
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "pods") ||
action.Matches("list", "deployments") ||
action.Matches("list", "replicasets") ||
action.Matches("watch", "pods") ||
action.Matches("watch", "deployments") ||
action.Matches("watch", "replicasets")) {
continue
}
ret = append(ret, action)
}
return ret
}
func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
f.dLister = append(f.dLister, d)
f.objects = append(f.objects, d)
rs := newReplicaSet(d, "deploymentrs-4186632231", 1)
f.expectCreateRSAction(rs)
f.expectUpdateDeploymentStatusAction(d)
f.expectUpdateDeploymentStatusAction(d)
f.run(getKey(d, t))
}
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
now := metav1.Now()
d.DeletionTimestamp = &now
f.dLister = append(f.dLister, d)
f.objects = append(f.objects, d)
f.expectUpdateDeploymentStatusAction(d)
f.run(getKey(d, t))
}
func TestSyncDeploymentDeletionRace(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := *d
// Lister (cache) says NOT deleted.
f.dLister = append(f.dLister, d)
// Bare client says it IS deleted. This should be presumed more up-to-date.
now := metav1.Now()
d2.DeletionTimestamp = &now
f.objects = append(f.objects, &d2)
// The recheck is only triggered if a matching orphan exists.
rs := newReplicaSet(d, "rs1", 1)
rs.OwnerReferences = nil
f.objects = append(f.objects, rs)
f.rsLister = append(f.rsLister, rs)
// Expect to only recheck DeletionTimestamp.
f.expectGetDeploymentAction(d)
// Sync should fail and requeue to let cache catch up.
// Don't start informers, since we don't want cache to catch up for this test.
f.runExpectError(getKey(d, t), false)
}
// issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d.Spec.Selector = &metav1.LabelSelector{}
f.dLister = append(f.dLister, d)
f.objects = append(f.objects, d)
// Normally there should be a status update to sync observedGeneration but the fake
// deployment has no generation set so there is no action happpening here.
f.run(getKey(d, t))
}
func TestReentrantRollback(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d.Annotations = map[string]string{util.RevisionAnnotation: "2"}
setRollbackTo(d, &extensions.RollbackConfig{Revision: 0})
f.dLister = append(f.dLister, d)
rs1 := newReplicaSet(d, "deploymentrs-old", 0)
rs1.Annotations = map[string]string{util.RevisionAnnotation: "1"}
one := int64(1)
rs1.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
rs1.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
rs2 := newReplicaSet(d, "deploymentrs-new", 1)
rs2.Annotations = map[string]string{util.RevisionAnnotation: "2"}
rs2.Spec.Selector.MatchLabels[apps.DefaultDeploymentUniqueLabelKey] = "hash"
f.rsLister = append(f.rsLister, rs1, rs2)
f.objects = append(f.objects, d, rs1, rs2)
// Rollback is done here
f.expectUpdateDeploymentAction(d)
// Expect no update on replica sets though
f.run(getKey(d, t))
}
// TestPodDeletionEnqueuesRecreateDeployment ensures that the deletion of a pod
// will requeue a Recreate deployment iff there is no other pod returned from the
// client.
func TestPodDeletionEnqueuesRecreateDeployment(t *testing.T) {
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs := newReplicaSet(foo, "foo-1", 1)
pod := generatePodFromRS(rs)
f.dLister = append(f.dLister, foo)
f.rsLister = append(f.rsLister, rs)
f.objects = append(f.objects, foo, rs)
c, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
}
c.deletePod(pod)
if !enqueued {
t.Errorf("expected deployment %q to be queued after pod deletion", foo.Name)
}
}
// TestPodDeletionDoesntEnqueueRecreateDeployment ensures that the deletion of a pod
// will not requeue a Recreate deployment iff there are other pods returned from the
// client.
func TestPodDeletionDoesntEnqueueRecreateDeployment(t *testing.T) {
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs1 := newReplicaSet(foo, "foo-1", 1)
rs2 := newReplicaSet(foo, "foo-1", 1)
pod1 := generatePodFromRS(rs1)
pod2 := generatePodFromRS(rs2)
f.dLister = append(f.dLister, foo)
// Let's pretend this is a different pod. The gist is that the pod lister needs to
// return a non-empty list.
f.podLister = append(f.podLister, pod1, pod2)
c, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
}
c.deletePod(pod1)
if enqueued {
t.Errorf("expected deployment %q not to be queued after pod deletion", foo.Name)
}
}
// TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment ensures that
// the deletion of a pod will requeue a Recreate deployment iff there is no other
// pod returned from the client in the case where a deployment has multiple replica
// sets, some of which have empty owner references.
func TestPodDeletionPartialReplicaSetOwnershipEnqueueRecreateDeployment(t *testing.T) {
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs1 := newReplicaSet(foo, "foo-1", 1)
rs2 := newReplicaSet(foo, "foo-2", 2)
rs2.OwnerReferences = nil
pod := generatePodFromRS(rs1)
f.dLister = append(f.dLister, foo)
f.rsLister = append(f.rsLister, rs1, rs2)
f.objects = append(f.objects, foo, rs1, rs2)
c, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
}
c.deletePod(pod)
if !enqueued {
t.Errorf("expected deployment %q to be queued after pod deletion", foo.Name)
}
}
// TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment that the
// deletion of a pod will not requeue a Recreate deployment iff there are other pods
// returned from the client in the case where a deployment has multiple replica sets,
// some of which have empty owner references.
func TestPodDeletionPartialReplicaSetOwnershipDoesntEnqueueRecreateDeployment(t *testing.T) {
f := newFixture(t)
foo := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
foo.Spec.Strategy.Type = apps.RecreateDeploymentStrategyType
rs1 := newReplicaSet(foo, "foo-1", 1)
rs2 := newReplicaSet(foo, "foo-2", 2)
rs2.OwnerReferences = nil
pod := generatePodFromRS(rs1)
f.dLister = append(f.dLister, foo)
f.rsLister = append(f.rsLister, rs1, rs2)
f.objects = append(f.objects, foo, rs1, rs2)
// Let's pretend this is a different pod. The gist is that the pod lister needs to
// return a non-empty list.
f.podLister = append(f.podLister, pod)
c, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
enqueued := false
c.enqueueDeployment = func(d *apps.Deployment) {
if d.Name == "foo" {
enqueued = true
}
}
c.deletePod(pod)
if enqueued {
t.Errorf("expected deployment %q not to be queued after pod deletion", foo.Name)
}
}
func TestGetReplicaSetsForDeployment(t *testing.T) {
f := newFixture(t)
// Two Deployments with same labels.
d1 := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("bar", 1, nil, nil, nil, map[string]string{"foo": "bar"})
// Two ReplicaSets that match labels for both Deployments,
// but have ControllerRefs to make ownership explicit.
rs1 := newReplicaSet(d1, "rs1", 1)
rs2 := newReplicaSet(d2, "rs2", 1)
f.dLister = append(f.dLister, d1, d2)
f.rsLister = append(f.rsLister, rs1, rs2)
f.objects = append(f.objects, d1, d2, rs1, rs2)
// Start the fixture.
c, informers, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
rsList, err := c.getReplicaSetsForDeployment(d1)
if err != nil {
t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
}
rsNames := []string{}
for _, rs := range rsList {
rsNames = append(rsNames, rs.Name)
}
if len(rsNames) != 1 || rsNames[0] != rs1.Name {
t.Errorf("getReplicaSetsForDeployment() = %v, want [%v]", rsNames, rs1.Name)
}
rsList, err = c.getReplicaSetsForDeployment(d2)
if err != nil {
t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
}
rsNames = []string{}
for _, rs := range rsList {
rsNames = append(rsNames, rs.Name)
}
if len(rsNames) != 1 || rsNames[0] != rs2.Name {
t.Errorf("getReplicaSetsForDeployment() = %v, want [%v]", rsNames, rs2.Name)
}
}
func TestGetReplicaSetsForDeploymentAdoptRelease(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
// RS with matching labels, but orphaned. Should be adopted and returned.
rsAdopt := newReplicaSet(d, "rsAdopt", 1)
rsAdopt.OwnerReferences = nil
// RS with matching ControllerRef, but wrong labels. Should be released.
rsRelease := newReplicaSet(d, "rsRelease", 1)
rsRelease.Labels = map[string]string{"foo": "notbar"}
f.dLister = append(f.dLister, d)
f.rsLister = append(f.rsLister, rsAdopt, rsRelease)
f.objects = append(f.objects, d, rsAdopt, rsRelease)
// Start the fixture.
c, informers, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
rsList, err := c.getReplicaSetsForDeployment(d)
if err != nil {
t.Fatalf("getReplicaSetsForDeployment() error: %v", err)
}
rsNames := []string{}
for _, rs := range rsList {
rsNames = append(rsNames, rs.Name)
}
if len(rsNames) != 1 || rsNames[0] != rsAdopt.Name {
t.Errorf("getReplicaSetsForDeployment() = %v, want [%v]", rsNames, rsAdopt.Name)
}
}
func TestGetPodMapForReplicaSets(t *testing.T) {
f := newFixture(t)
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
rs1 := newReplicaSet(d, "rs1", 1)
rs2 := newReplicaSet(d, "rs2", 1)
// Add a Pod for each ReplicaSet.
pod1 := generatePodFromRS(rs1)
pod2 := generatePodFromRS(rs2)
// Add a Pod that has matching labels, but no ControllerRef.
pod3 := generatePodFromRS(rs1)
pod3.Name = "pod3"
pod3.OwnerReferences = nil
// Add a Pod that has matching labels and ControllerRef, but is inactive.
pod4 := generatePodFromRS(rs1)
pod4.Name = "pod4"
pod4.Status.Phase = v1.PodFailed
f.dLister = append(f.dLister, d)
f.rsLister = append(f.rsLister, rs1, rs2)
f.podLister = append(f.podLister, pod1, pod2, pod3, pod4)
f.objects = append(f.objects, d, rs1, rs2, pod1, pod2, pod3, pod4)
// Start the fixture.
c, informers, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
podMap, err := c.getPodMapForDeployment(d, f.rsLister)
if err != nil {
t.Fatalf("getPodMapForDeployment() error: %v", err)
}
podCount := 0
for _, podList := range podMap {
podCount += len(podList.Items)
}
if got, want := podCount, 3; got != want {
t.Errorf("podCount = %v, want %v", got, want)
}
if got, want := len(podMap), 2; got != want {
t.Errorf("len(podMap) = %v, want %v", got, want)
}
if got, want := len(podMap[rs1.UID].Items), 2; got != want {
t.Errorf("len(podMap[rs1]) = %v, want %v", got, want)
}
expect := map[string]struct{}{"rs1-pod": {}, "pod4": {}}
for _, pod := range podMap[rs1.UID].Items {
if _, ok := expect[pod.Name]; !ok {
t.Errorf("unexpected pod name for rs1: %s", pod.Name)
}
}
if got, want := len(podMap[rs2.UID].Items), 1; got != want {
t.Errorf("len(podMap[rs2]) = %v, want %v", got, want)
}
if got, want := podMap[rs2.UID].Items[0].Name, "rs2-pod"; got != want {
t.Errorf("podMap[rs2] = [%v], want [%v]", got, want)
}
}
func TestAddReplicaSet(t *testing.T) {
f := newFixture(t)
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
// Two ReplicaSets that match labels for both Deployments,
// but have ControllerRefs to make ownership explicit.
rs1 := newReplicaSet(d1, "rs1", 1)
rs2 := newReplicaSet(d2, "rs2", 1)
f.dLister = append(f.dLister, d1, d2)
f.objects = append(f.objects, d1, d2, rs1, rs2)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
dc.addReplicaSet(rs1)
if got, want := dc.queue.Len(), 1; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
key, done := dc.queue.Get()
if key == nil || done {
t.Fatalf("failed to enqueue controller for rs %v", rs1.Name)
}
expectedKey, _ := controller.KeyFunc(d1)
if got, want := key.(string), expectedKey; got != want {
t.Errorf("queue.Get() = %v, want %v", got, want)
}
dc.addReplicaSet(rs2)
if got, want := dc.queue.Len(), 1; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
key, done = dc.queue.Get()
if key == nil || done {
t.Fatalf("failed to enqueue controller for rs %v", rs2.Name)
}
expectedKey, _ = controller.KeyFunc(d2)
if got, want := key.(string), expectedKey; got != want {
t.Errorf("queue.Get() = %v, want %v", got, want)
}
}
func TestAddReplicaSetOrphan(t *testing.T) {
f := newFixture(t)
// 2 will match the RS, 1 won't.
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d3 := newDeployment("d3", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d3.Spec.Selector.MatchLabels = map[string]string{"foo": "notbar"}
// Make the RS an orphan. Expect matching Deployments to be queued.
rs := newReplicaSet(d1, "rs1", 1)
rs.OwnerReferences = nil
f.dLister = append(f.dLister, d1, d2, d3)
f.objects = append(f.objects, d1, d2, d3)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
dc.addReplicaSet(rs)
if got, want := dc.queue.Len(), 2; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
}
func TestUpdateReplicaSet(t *testing.T) {
f := newFixture(t)
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
// Two ReplicaSets that match labels for both Deployments,
// but have ControllerRefs to make ownership explicit.
rs1 := newReplicaSet(d1, "rs1", 1)
rs2 := newReplicaSet(d2, "rs2", 1)
f.dLister = append(f.dLister, d1, d2)
f.rsLister = append(f.rsLister, rs1, rs2)
f.objects = append(f.objects, d1, d2, rs1, rs2)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
prev := *rs1
next := *rs1
bumpResourceVersion(&next)
dc.updateReplicaSet(&prev, &next)
if got, want := dc.queue.Len(), 1; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
key, done := dc.queue.Get()
if key == nil || done {
t.Fatalf("failed to enqueue controller for rs %v", rs1.Name)
}
expectedKey, _ := controller.KeyFunc(d1)
if got, want := key.(string), expectedKey; got != want {
t.Errorf("queue.Get() = %v, want %v", got, want)
}
prev = *rs2
next = *rs2
bumpResourceVersion(&next)
dc.updateReplicaSet(&prev, &next)
if got, want := dc.queue.Len(), 1; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
key, done = dc.queue.Get()
if key == nil || done {
t.Fatalf("failed to enqueue controller for rs %v", rs2.Name)
}
expectedKey, _ = controller.KeyFunc(d2)
if got, want := key.(string), expectedKey; got != want {
t.Errorf("queue.Get() = %v, want %v", got, want)
}
}
func TestUpdateReplicaSetOrphanWithNewLabels(t *testing.T) {
f := newFixture(t)
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
// RS matches both, but is an orphan.
rs := newReplicaSet(d1, "rs1", 1)
rs.OwnerReferences = nil
f.dLister = append(f.dLister, d1, d2)
f.rsLister = append(f.rsLister, rs)
f.objects = append(f.objects, d1, d2, rs)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
// Change labels and expect all matching controllers to queue.
prev := *rs
prev.Labels = map[string]string{"foo": "notbar"}
next := *rs
bumpResourceVersion(&next)
dc.updateReplicaSet(&prev, &next)
if got, want := dc.queue.Len(), 2; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
}
func TestUpdateReplicaSetChangeControllerRef(t *testing.T) {
f := newFixture(t)
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
rs := newReplicaSet(d1, "rs1", 1)
f.dLister = append(f.dLister, d1, d2)
f.rsLister = append(f.rsLister, rs)
f.objects = append(f.objects, d1, d2, rs)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
// Change ControllerRef and expect both old and new to queue.
prev := *rs
prev.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(d2, controllerKind)}
next := *rs
bumpResourceVersion(&next)
dc.updateReplicaSet(&prev, &next)
if got, want := dc.queue.Len(), 2; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
}
func TestUpdateReplicaSetRelease(t *testing.T) {
f := newFixture(t)
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
rs := newReplicaSet(d1, "rs1", 1)
f.dLister = append(f.dLister, d1, d2)
f.rsLister = append(f.rsLister, rs)
f.objects = append(f.objects, d1, d2, rs)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
// Remove ControllerRef and expect all matching controller to sync orphan.
prev := *rs
next := *rs
next.OwnerReferences = nil
bumpResourceVersion(&next)
dc.updateReplicaSet(&prev, &next)
if got, want := dc.queue.Len(), 2; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
}
func TestDeleteReplicaSet(t *testing.T) {
f := newFixture(t)
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
// Two ReplicaSets that match labels for both Deployments,
// but have ControllerRefs to make ownership explicit.
rs1 := newReplicaSet(d1, "rs1", 1)
rs2 := newReplicaSet(d2, "rs2", 1)
f.dLister = append(f.dLister, d1, d2)
f.rsLister = append(f.rsLister, rs1, rs2)
f.objects = append(f.objects, d1, d2, rs1, rs2)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
dc.deleteReplicaSet(rs1)
if got, want := dc.queue.Len(), 1; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
key, done := dc.queue.Get()
if key == nil || done {
t.Fatalf("failed to enqueue controller for rs %v", rs1.Name)
}
expectedKey, _ := controller.KeyFunc(d1)
if got, want := key.(string), expectedKey; got != want {
t.Errorf("queue.Get() = %v, want %v", got, want)
}
dc.deleteReplicaSet(rs2)
if got, want := dc.queue.Len(), 1; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
key, done = dc.queue.Get()
if key == nil || done {
t.Fatalf("failed to enqueue controller for rs %v", rs2.Name)
}
expectedKey, _ = controller.KeyFunc(d2)
if got, want := key.(string), expectedKey; got != want {
t.Errorf("queue.Get() = %v, want %v", got, want)
}
}
func TestDeleteReplicaSetOrphan(t *testing.T) {
f := newFixture(t)
d1 := newDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"})
d2 := newDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar"})
// Make the RS an orphan. Expect matching Deployments to be queued.
rs := newReplicaSet(d1, "rs1", 1)
rs.OwnerReferences = nil
f.dLister = append(f.dLister, d1, d2)
f.rsLister = append(f.rsLister, rs)
f.objects = append(f.objects, d1, d2, rs)
// Create the fixture but don't start it,
// so nothing happens in the background.
dc, _, err := f.newController()
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
dc.deleteReplicaSet(rs)
if got, want := dc.queue.Len(), 0; got != want {
t.Fatalf("queue.Len() = %v, want %v", got, want)
}
}
func bumpResourceVersion(obj metav1.Object) {
ver, _ := strconv.ParseInt(obj.GetResourceVersion(), 10, 32)
obj.SetResourceVersion(strconv.FormatInt(ver+1, 10))
}
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
func generatePodFromRS(rs *apps.ReplicaSet) *v1.Pod {
trueVar := true
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: rs.Name + "-pod",
Namespace: rs.Namespace,
Labels: rs.Spec.Selector.MatchLabels,
OwnerReferences: []metav1.OwnerReference{
{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar},
},
},
Spec: rs.Spec.Template.Spec,
}
}

View File

@ -1,198 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"reflect"
"time"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
// syncRolloutStatus updates the status of a deployment during a rollout. There are
// cases this helper will run that cannot be prevented from the scaling detection,
// for example a resync of the deployment after it was scaled up. In those cases,
// we shouldn't try to estimate any progress.
func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
newStatus := calculateStatus(allRSs, newRS, d)
// If there is no progressDeadlineSeconds set, remove any Progressing condition.
if d.Spec.ProgressDeadlineSeconds == nil {
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
}
// If there is only one replica set that is active then that means we are not running
// a new rollout and this is a resync where we don't need to estimate any progress.
// In such a case, we should simply not estimate any progress for this deployment.
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason
// Check for progress only if there is a progress deadline set and the latest rollout
// hasn't completed yet.
if d.Spec.ProgressDeadlineSeconds != nil && !isCompleteDeployment {
switch {
case util.DeploymentComplete(d, &newStatus):
// Update the deployment conditions with a message for the new replica set that
// was successfully deployed. If the condition already exists, we ignore this update.
msg := fmt.Sprintf("Deployment %q has successfully progressed.", d.Name)
if newRS != nil {
msg = fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name)
}
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentProgressing(d, &newStatus):
// If there is any progress made, continue by not checking if the deployment failed. This
// behavior emulates the rolling updater progressDeadline check.
msg := fmt.Sprintf("Deployment %q is progressing.", d.Name)
if newRS != nil {
msg = fmt.Sprintf("ReplicaSet %q is progressing.", newRS.Name)
}
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
// Update the current Progressing condition or add a new one if it doesn't exist.
// If a Progressing condition with status=true already exists, we should update
// everything but lastTransitionTime. SetDeploymentCondition already does that but
// it also is not updating conditions when the reason of the new condition is the
// same as the old. The Progressing condition is a special case because we want to
// update with the same reason and change just lastUpdateTime iff we notice any
// progress. That's why we handle it here.
if currentCond != nil {
if currentCond.Status == v1.ConditionTrue {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing)
}
util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentTimedOut(d, &newStatus):
// Update the deployment with a timeout condition. If the condition already exists,
// we ignore this update.
msg := fmt.Sprintf("Deployment %q has timed out progressing.", d.Name)
if newRS != nil {
msg = fmt.Sprintf("ReplicaSet %q has timed out progressing.", newRS.Name)
}
condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
util.SetDeploymentCondition(&newStatus, *condition)
}
}
// Move failure conditions of all replica sets in deployment conditions. For now,
// only one failure condition is returned from getReplicaFailures.
if replicaFailureCond := dc.getReplicaFailures(allRSs, newRS); len(replicaFailureCond) > 0 {
// There will be only one ReplicaFailure condition on the replica set.
util.SetDeploymentCondition(&newStatus, replicaFailureCond[0])
} else {
util.RemoveDeploymentCondition(&newStatus, apps.DeploymentReplicaFailure)
}
// Do not update if there is nothing new to add.
if reflect.DeepEqual(d.Status, newStatus) {
// Requeue the deployment if required.
dc.requeueStuckDeployment(d, newStatus)
return nil
}
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}
// getReplicaFailures will convert replica failure conditions from replica sets
// to deployment conditions.
func (dc *DeploymentController) getReplicaFailures(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) []apps.DeploymentCondition {
var conditions []apps.DeploymentCondition
if newRS != nil {
for _, c := range newRS.Status.Conditions {
if c.Type != apps.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
}
}
// Return failures for the new replica set over failures from old replica sets.
if len(conditions) > 0 {
return conditions
}
for i := range allRSs {
rs := allRSs[i]
if rs == nil {
continue
}
for _, c := range rs.Status.Conditions {
if c.Type != apps.ReplicaSetReplicaFailure {
continue
}
conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c))
}
}
return conditions
}
// used for unit testing
var nowFn = func() time.Time { return time.Now() }
// requeueStuckDeployment checks whether the provided deployment needs to be synced for a progress
// check. It returns the time after the deployment will be requeued for the progress check, 0 if it
// will be requeued now, or -1 if it does not need to be requeued.
func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration {
currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
// Can't estimate progress if there is no deadline in the spec or progressing condition in the current status.
if d.Spec.ProgressDeadlineSeconds == nil || currentCond == nil {
return time.Duration(-1)
}
// No need to estimate progress if the rollout is complete or already timed out.
if util.DeploymentComplete(d, &newStatus) || currentCond.Reason == util.TimedOutReason {
return time.Duration(-1)
}
// If there is no sign of progress at this point then there is a high chance that the
// deployment is stuck. We should resync this deployment at some point in the future[1]
// and check whether it has timed out. We definitely need this, otherwise we depend on the
// controller resync interval. See https://github.com/kubernetes/kubernetes/issues/34458.
//
// [1] ProgressingCondition.LastUpdatedTime + progressDeadlineSeconds - time.Now()
//
// For example, if a Deployment updated its Progressing condition 3 minutes ago and has a
// deadline of 10 minutes, it would need to be resynced for a progress check after 7 minutes.
//
// lastUpdated: 00:00:00
// now: 00:03:00
// progressDeadlineSeconds: 600 (10 minutes)
//
// lastUpdated + progressDeadlineSeconds - now => 00:00:00 + 00:10:00 - 00:03:00 => 07:00
after := currentCond.LastUpdateTime.Time.Add(time.Duration(*d.Spec.ProgressDeadlineSeconds) * time.Second).Sub(nowFn())
// If the remaining time is less than a second, then requeue the deployment immediately.
// Make it ratelimited so we stay on the safe side, eventually the Deployment should
// transition either to a Complete or to a TimedOut condition.
if after < time.Second {
glog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name)
dc.enqueueRateLimited(d)
return time.Duration(0)
}
glog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds()))
// Add a second to avoid milliseconds skew in AddAfter.
// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
dc.enqueueAfter(d, after+time.Second)
return after
}

View File

@ -1,345 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) apps.DeploymentStatus {
return apps.DeploymentStatus{
Replicas: replicas,
UpdatedReplicas: updatedReplicas,
AvailableReplicas: availableReplicas,
}
}
// assumes the retuned deployment is always observed - not needed to be tested here.
func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []apps.DeploymentCondition) *apps.Deployment {
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "progress-test",
},
Spec: apps.DeploymentSpec{
ProgressDeadlineSeconds: pds,
Replicas: &replicas,
Strategy: apps.DeploymentStrategy{
Type: apps.RecreateDeploymentStrategyType,
},
},
Status: newDeploymentStatus(statusReplicas, updatedReplicas, availableReplicas),
}
d.Status.Conditions = conditions
return d
}
// helper to create RS with given availableReplicas
func newRSWithAvailable(name string, specReplicas, statusReplicas, availableReplicas int) *apps.ReplicaSet {
rs := rs(name, specReplicas, nil, metav1.Time{})
rs.Status = apps.ReplicaSetStatus{
Replicas: int32(statusReplicas),
AvailableReplicas: int32(availableReplicas),
}
return rs
}
func TestRequeueStuckDeployment(t *testing.T) {
pds := int32(60)
failed := []apps.DeploymentCondition{
{
Type: apps.DeploymentProgressing,
Status: v1.ConditionFalse,
Reason: util.TimedOutReason,
},
}
stuck := []apps.DeploymentCondition{
{
Type: apps.DeploymentProgressing,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC),
},
}
tests := []struct {
name string
d *apps.Deployment
status apps.DeploymentStatus
nowFn func() time.Time
expected time.Duration
}{
{
name: "no progressDeadlineSeconds specified",
d: currentDeployment(nil, 4, 3, 3, 2, nil),
status: newDeploymentStatus(3, 3, 2),
expected: time.Duration(-1),
},
{
name: "no progressing condition found",
d: currentDeployment(&pds, 4, 3, 3, 2, nil),
status: newDeploymentStatus(3, 3, 2),
expected: time.Duration(-1),
},
{
name: "complete deployment does not need to be requeued",
d: currentDeployment(&pds, 3, 3, 3, 3, nil),
status: newDeploymentStatus(3, 3, 3),
expected: time.Duration(-1),
},
{
name: "already failed deployment does not need to be requeued",
d: currentDeployment(&pds, 3, 3, 3, 0, failed),
status: newDeploymentStatus(3, 3, 0),
expected: time.Duration(-1),
},
{
name: "stuck deployment - 30s",
d: currentDeployment(&pds, 3, 3, 3, 1, stuck),
status: newDeploymentStatus(3, 3, 1),
nowFn: func() time.Time { return metav1.Date(2017, 2, 15, 18, 49, 30, 00, time.UTC).Time },
expected: 30 * time.Second,
},
{
name: "stuck deployment - 1s",
d: currentDeployment(&pds, 3, 3, 3, 1, stuck),
status: newDeploymentStatus(3, 3, 1),
nowFn: func() time.Time { return metav1.Date(2017, 2, 15, 18, 49, 59, 00, time.UTC).Time },
expected: 1 * time.Second,
},
{
name: "failed deployment - less than a second => now",
d: currentDeployment(&pds, 3, 3, 3, 1, stuck),
status: newDeploymentStatus(3, 3, 1),
nowFn: func() time.Time { return metav1.Date(2017, 2, 15, 18, 49, 59, 1, time.UTC).Time },
expected: time.Duration(0),
},
{
name: "failed deployment - now",
d: currentDeployment(&pds, 3, 3, 3, 1, stuck),
status: newDeploymentStatus(3, 3, 1),
nowFn: func() time.Time { return metav1.Date(2017, 2, 15, 18, 50, 00, 00, time.UTC).Time },
expected: time.Duration(0),
},
{
name: "failed deployment - 1s after deadline",
d: currentDeployment(&pds, 3, 3, 3, 1, stuck),
status: newDeploymentStatus(3, 3, 1),
nowFn: func() time.Time { return metav1.Date(2017, 2, 15, 18, 50, 01, 00, time.UTC).Time },
expected: time.Duration(0),
},
{
name: "failed deployment - 60s after deadline",
d: currentDeployment(&pds, 3, 3, 3, 1, stuck),
status: newDeploymentStatus(3, 3, 1),
nowFn: func() time.Time { return metav1.Date(2017, 2, 15, 18, 51, 00, 00, time.UTC).Time },
expected: time.Duration(0),
},
}
dc := &DeploymentController{
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "doesnt-matter"),
}
dc.enqueueDeployment = dc.enqueue
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.nowFn != nil {
nowFn = test.nowFn
}
got := dc.requeueStuckDeployment(test.d, test.status)
if got != test.expected {
t.Errorf("%s: got duration: %v, expected duration: %v", test.name, got, test.expected)
}
})
}
}
func TestSyncRolloutStatus(t *testing.T) {
pds := int32(60)
testTime := metav1.Date(2017, 2, 15, 18, 49, 00, 00, time.UTC)
failedTimedOut := apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionFalse,
Reason: util.TimedOutReason,
}
newRSAvailable := apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionTrue,
Reason: util.NewRSAvailableReason,
LastUpdateTime: testTime,
LastTransitionTime: testTime,
}
replicaSetUpdated := apps.DeploymentCondition{
Type: apps.DeploymentProgressing,
Status: v1.ConditionTrue,
Reason: util.ReplicaSetUpdatedReason,
LastUpdateTime: testTime,
LastTransitionTime: testTime,
}
tests := []struct {
name string
d *apps.Deployment
allRSs []*apps.ReplicaSet
newRS *apps.ReplicaSet
conditionType apps.DeploymentConditionType
conditionStatus v1.ConditionStatus
conditionReason string
lastUpdate metav1.Time
lastTransition metav1.Time
}{
{
name: "General: remove Progressing condition and do not estimate progress if deployment has no Progress Deadline",
d: currentDeployment(nil, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
},
{
name: "General: do not estimate progress of deployment with only one active ReplicaSet",
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{newRSAvailable}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 3, 3, 3)},
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
lastUpdate: testTime,
lastTransition: testTime,
},
{
name: "DeploymentProgressing: dont update lastTransitionTime if deployment already has Progressing=True",
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.ReplicaSetUpdatedReason,
lastTransition: testTime,
},
{
name: "DeploymentProgressing: update everything if deployment has Progressing=False",
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.ReplicaSetUpdatedReason,
},
{
name: "DeploymentProgressing: create Progressing condition if it does not exist",
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("bar", 0, 1, 1)},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.ReplicaSetUpdatedReason,
},
{
name: "DeploymentComplete: dont update lastTransitionTime if deployment already has Progressing=True",
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 3, 3),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
lastTransition: testTime,
},
{
name: "DeploymentComplete: update everything if deployment has Progressing=False",
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{failedTimedOut}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 3, 3),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
},
{
name: "DeploymentComplete: create Progressing condition if it does not exist",
d: currentDeployment(&pds, 3, 3, 3, 3, []apps.DeploymentCondition{}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 3, 3),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
},
{
name: "DeploymentComplete: defend against NPE when newRS=nil",
d: currentDeployment(&pds, 0, 3, 3, 3, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{newRSWithAvailable("foo", 0, 0, 0)},
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionTrue,
conditionReason: util.NewRSAvailableReason,
},
{
name: "DeploymentTimedOut: update status if rollout exceeds Progress Deadline",
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{replicaSetUpdated}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionFalse,
conditionReason: util.TimedOutReason,
},
{
name: "DeploymentTimedOut: do not update status if deployment has existing timedOut condition",
d: currentDeployment(&pds, 3, 2, 2, 2, []apps.DeploymentCondition{failedTimedOut}),
allRSs: []*apps.ReplicaSet{},
newRS: newRSWithAvailable("foo", 3, 2, 2),
conditionType: apps.DeploymentProgressing,
conditionStatus: v1.ConditionFalse,
conditionReason: util.TimedOutReason,
lastUpdate: testTime,
lastTransition: testTime,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fake := fake.Clientset{}
dc := &DeploymentController{
client: &fake,
}
if test.newRS != nil {
test.allRSs = append(test.allRSs, test.newRS)
}
err := dc.syncRolloutStatus(test.allRSs, test.newRS, test.d)
if err != nil {
t.Error(err)
}
newCond := util.GetDeploymentCondition(test.d.Status, test.conditionType)
switch {
case newCond == nil:
if test.d.Spec.ProgressDeadlineSeconds != nil {
t.Errorf("%s: expected deployment condition: %s", test.name, test.conditionType)
}
case newCond.Status != test.conditionStatus || newCond.Reason != test.conditionReason:
t.Errorf("%s: DeploymentProgressing has status %s with reason %s. Expected %s with %s.", test.name, newCond.Status, newCond.Reason, test.conditionStatus, test.conditionReason)
case !test.lastUpdate.IsZero() && test.lastUpdate != testTime:
t.Errorf("%s: LastUpdateTime was changed to %s but expected %s;", test.name, test.lastUpdate, testTime)
case !test.lastTransition.IsZero() && test.lastTransition != testTime:
t.Errorf("%s: LastTransitionTime was changed to %s but expected %s;", test.name, test.lastTransition, testTime)
}
})
}
}

View File

@ -1,129 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
// rolloutRecreate implements the logic for recreating a replica set.
func (dc *DeploymentController) rolloutRecreate(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
activeOldRSs := controller.FilterActiveReplicaSets(oldRSs)
// scale down old replica sets.
scaledDown, err := dc.scaleDownOldReplicaSetsForRecreate(activeOldRSs, d)
if err != nil {
return err
}
if scaledDown {
// Update DeploymentStatus.
return dc.syncRolloutStatus(allRSs, newRS, d)
}
// Do not process a deployment when it has old pods running.
if oldPodsRunning(newRS, oldRSs, podMap) {
return dc.syncRolloutStatus(allRSs, newRS, d)
}
// If we need to create a new RS, create it now.
if newRS == nil {
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
}
allRSs = append(oldRSs, newRS)
}
// scale up new replica set.
if _, err := dc.scaleUpNewReplicaSetForRecreate(newRS, d); err != nil {
return err
}
if util.DeploymentComplete(d, &d.Status) {
if err := dc.cleanupDeployment(oldRSs, d); err != nil {
return err
}
}
// Sync deployment status.
return dc.syncRolloutStatus(allRSs, newRS, d)
}
// scaleDownOldReplicaSetsForRecreate scales down old replica sets when deployment strategy is "Recreate".
func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
scaled := false
for i := range oldRSs {
rs := oldRSs[i]
// Scaling not required.
if *(rs.Spec.Replicas) == 0 {
continue
}
scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment)
if err != nil {
return false, err
}
if scaledRS {
oldRSs[i] = updatedRS
scaled = true
}
}
return scaled, nil
}
// oldPodsRunning returns whether there are old pods running or any of the old ReplicaSets thinks that it runs pods.
func oldPodsRunning(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) bool {
if oldPods := util.GetActualReplicaCountForReplicaSets(oldRSs); oldPods > 0 {
return true
}
for rsUID, podList := range podMap {
// If the pods belong to the new ReplicaSet, ignore.
if newRS != nil && newRS.UID == rsUID {
continue
}
for _, pod := range podList.Items {
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
// Don't count pods in terminal state.
continue
case v1.PodUnknown:
// This happens in situation like when the node is temporarily disconnected from the cluster.
// If we can't be sure that the pod is not running, we have to count it.
return true
default:
// Pod is not in terminal phase.
return true
}
}
}
return false
}
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate".
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err
}

View File

@ -1,246 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller"
)
func TestScaleDownOldReplicaSets(t *testing.T) {
tests := []struct {
oldRSSizes []int
d *apps.Deployment
}{
{
oldRSSizes: []int{3},
d: newDeployment("foo", 3, nil, nil, nil, map[string]string{"foo": "bar"}),
},
}
for i := range tests {
t.Logf("running scenario %d", i)
test := tests[i]
var oldRSs []*apps.ReplicaSet
var expected []runtime.Object
for n, size := range test.oldRSSizes {
rs := newReplicaSet(test.d, fmt.Sprintf("%s-%d", test.d.Name, n), size)
oldRSs = append(oldRSs, rs)
rsCopy := rs.DeepCopy()
zero := int32(0)
rsCopy.Spec.Replicas = &zero
expected = append(expected, rsCopy)
if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*apps.ReplicaSet).Spec.Replicas) {
t.Errorf("broken test - original and expected RS have the same size")
}
}
kc := fake.NewSimpleClientset(expected...)
informers := informers.NewSharedInformerFactory(kc, controller.NoResyncPeriodFunc())
c, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), kc)
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
c.eventRecorder = &record.FakeRecorder{}
c.scaleDownOldReplicaSetsForRecreate(oldRSs, test.d)
for j := range oldRSs {
rs := oldRSs[j]
if *rs.Spec.Replicas != 0 {
t.Errorf("rs %q has non-zero replicas", rs.Name)
}
}
}
}
func TestOldPodsRunning(t *testing.T) {
tests := []struct {
name string
newRS *apps.ReplicaSet
oldRSs []*apps.ReplicaSet
podMap map[types.UID]*v1.PodList
hasOldPodsRunning bool
}{
{
name: "no old RSs",
hasOldPodsRunning: false,
},
{
name: "old RSs with running pods",
oldRSs: []*apps.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
hasOldPodsRunning: true,
},
{
name: "old RSs without pods but with non-zero status replicas",
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
hasOldPodsRunning: true,
},
{
name: "old RSs without pods or non-zero status replicas",
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
hasOldPodsRunning: false,
},
{
name: "old RSs with zero status replicas but pods in terminal state are present",
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodFailed,
},
},
{
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
},
},
},
},
hasOldPodsRunning: false,
},
{
name: "old RSs with zero status replicas but pod in unknown phase present",
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodUnknown,
},
},
},
},
},
hasOldPodsRunning: true,
},
{
name: "old RSs with zero status replicas with pending pod present",
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
},
},
},
hasOldPodsRunning: true,
},
{
name: "old RSs with zero status replicas with running pod present",
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
},
},
},
hasOldPodsRunning: true,
},
{
name: "old RSs with zero status replicas but pods in terminal state and pending are present",
oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
podMap: map[types.UID]*v1.PodList{
"uid-1": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodFailed,
},
},
{
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
},
},
},
"uid-2": {
Items: []v1.Pod{},
},
"uid-3": {
Items: []v1.Pod{
{
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
},
},
},
hasOldPodsRunning: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if expected, got := test.hasOldPodsRunning, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
t.Errorf("%s: expected %t, got %t", test.name, expected, got)
}
})
}
}
func rsWithUID(uid string) *apps.ReplicaSet {
d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
rs := newReplicaSet(d, fmt.Sprintf("foo-%s", uid), 0)
rs.UID = types.UID(uid)
return rs
}
func podMapWithUIDs(uids []string) map[types.UID]*v1.PodList {
podMap := make(map[types.UID]*v1.PodList)
for _, uid := range uids {
podMap[types.UID(uid)] = &v1.PodList{
Items: []v1.Pod{
{ /* supposedly a pod */ },
{ /* supposedly another pod pod */ },
},
}
}
return podMap
}

View File

@ -1,148 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"strconv"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
}
allRSs := append(allOldRSs, newRS)
rollbackTo := getRollbackTo(d)
// If rollback revision is 0, rollback to the last revision
if rollbackTo.Revision == 0 {
if rollbackTo.Revision = deploymentutil.LastRevision(allRSs); rollbackTo.Revision == 0 {
// If we still can't find the last revision, gives up rollback
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
// Gives up rollback
return dc.updateDeploymentAndClearRollbackTo(d)
}
}
for _, rs := range allRSs {
v, err := deploymentutil.Revision(rs)
if err != nil {
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
continue
}
if v == rollbackTo.Revision {
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
// rollback by copying podTemplate.Spec from the replica set
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
// no-op if the spec matches current deployment's podTemplate.Spec
performedRollback, err := dc.rollbackToTemplate(d, rs)
if performedRollback && err == nil {
dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, rollbackTo.Revision))
}
return err
}
}
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find the revision to rollback to.")
// Gives up rollback
return dc.updateDeploymentAndClearRollbackTo(d)
}
// rollbackToTemplate compares the templates of the provided deployment and replica set and
// updates the deployment with the replica set template in case they are different. It also
// cleans up the rollback spec so subsequent requeues of the deployment won't end up in here.
func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) {
performedRollback := false
if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec)
deploymentutil.SetFromReplicaSetTemplate(d, rs.Spec.Template)
// set RS (the old RS we'll rolling back to) annotations back to the deployment;
// otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback.
//
// For example,
// A Deployment has old RS1 with annotation {change-cause:create}, and new RS2 {change-cause:edit}.
// Note that both annotations are copied from Deployment, and the Deployment should be annotated {change-cause:edit} as well.
// Now, rollback Deployment to RS1, we should update Deployment's pod-template and also copy annotation from RS1.
// Deployment is now annotated {change-cause:create}, and we have new RS1 {change-cause:create}, old RS2 {change-cause:edit}.
//
// If we don't copy the annotations back from RS to deployment on rollback, the Deployment will stay as {change-cause:edit},
// and new RS1 becomes {change-cause:edit} (copied from deployment after rollback), old RS2 {change-cause:edit}, which is not correct.
deploymentutil.SetDeploymentAnnotationsTo(d, rs)
performedRollback = true
} else {
glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name)
eventMsg := fmt.Sprintf("The rollback revision contains the same template as current deployment %q", d.Name)
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg)
}
return performedRollback, dc.updateDeploymentAndClearRollbackTo(d)
}
func (dc *DeploymentController) emitRollbackWarningEvent(d *apps.Deployment, reason, message string) {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, reason, message)
}
func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, message string) {
dc.eventRecorder.Eventf(d, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
}
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
// It is assumed that the caller will have updated the deployment template appropriately (in case
// we want to rollback).
func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error {
glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name)
setRollbackTo(d, nil)
_, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d)
return err
}
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
func getRollbackTo(d *apps.Deployment) *extensions.RollbackConfig {
// Extract the annotation used for round-tripping the deprecated RollbackTo field.
revision := d.Annotations[apps.DeprecatedRollbackTo]
if revision == "" {
return nil
}
revision64, err := strconv.ParseInt(revision, 10, 64)
if err != nil {
// If it's invalid, ignore it.
return nil
}
return &extensions.RollbackConfig{
Revision: revision64,
}
}
// TODO: Remove this when extensions/v1beta1 and apps/v1beta1 Deployment are dropped.
func setRollbackTo(d *apps.Deployment, rollbackTo *extensions.RollbackConfig) {
if rollbackTo == nil {
delete(d.Annotations, apps.DeprecatedRollbackTo)
return
}
if d.Annotations == nil {
d.Annotations = make(map[string]string)
}
d.Annotations[apps.DeprecatedRollbackTo] = strconv.FormatInt(rollbackTo.Revision, 10)
}

View File

@ -1,235 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"sort"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/integer"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
// rolloutRolling implements the logic for rolling a new replica set.
func (dc *DeploymentController) rolloutRolling(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
// Scale up, if we can.
scaledUp, err := dc.reconcileNewReplicaSet(allRSs, newRS, d)
if err != nil {
return err
}
if scaledUp {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, d)
}
// Scale down, if we can.
scaledDown, err := dc.reconcileOldReplicaSets(allRSs, controller.FilterActiveReplicaSets(oldRSs), newRS, d)
if err != nil {
return err
}
if scaledDown {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, d)
}
if deploymentutil.DeploymentComplete(d, &d.Status) {
if err := dc.cleanupDeployment(oldRSs, d); err != nil {
return err
}
}
// Sync deployment status
return dc.syncRolloutStatus(allRSs, newRS, d)
}
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
// Scaling not required.
return false, nil
}
if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
// Scale down.
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err
}
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
if err != nil {
return false, err
}
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, newReplicasCount, deployment)
return scaled, err
}
func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) (bool, error) {
oldPodsCount := deploymentutil.GetReplicaCountForReplicaSets(oldRSs)
if oldPodsCount == 0 {
// Can't scale down further
return false, nil
}
allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
glog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas)
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down. We can scale down in the following 2 cases:
// * Some old replica sets have unhealthy replicas, we could safely scale down those unhealthy replicas since that won't further
// increase unavailability.
// * New replica set has scaled up and it's replicas becomes ready, then we can scale down old replica sets in a further step.
//
// maxScaledDown := allPodsCount - minAvailable - newReplicaSetPodsUnavailable
// take into account not only maxUnavailable and any surge pods that have been created, but also unavailable pods from
// the newRS, so that the unavailable pods from the newRS would not make us scale down old replica sets in a further
// step(that will increase unavailability).
//
// Concrete example:
//
// * 10 replicas
// * 2 maxUnavailable (absolute number, not percent)
// * 3 maxSurge (absolute number, not percent)
//
// case 1:
// * Deployment is updated, newRS is created with 3 replicas, oldRS is scaled down to 8, and newRS is scaled up to 5.
// * The new replica set pods crashloop and never become available.
// * allPodsCount is 13. minAvailable is 8. newRSPodsUnavailable is 5.
// * A node fails and causes one of the oldRS pods to become unavailable. However, 13 - 8 - 5 = 0, so the oldRS won't be scaled down.
// * The user notices the crashloop and does kubectl rollout undo to rollback.
// * newRSPodsUnavailable is 1, since we rolled back to the good replica set, so maxScaledDown = 13 - 8 - 1 = 4. 4 of the crashlooping pods will be scaled down.
// * The total number of pods will then be 9 and the newRS can be scaled up to 10.
//
// case 2:
// Same example, but pushing a new pod template instead of rolling back (aka "roll over"):
// * The new replica set created must start with 0 replicas because allPodsCount is already at 13.
// * However, newRSPodsUnavailable would also be 0, so the 2 old replica sets could be scaled down by 5 (13 - 8 - 0), which would then
// allow the new replica set to be scaled up by 5.
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
newRSUnavailablePodCount := *(newRS.Spec.Replicas) - newRS.Status.AvailableReplicas
maxScaledDown := allPodsCount - minAvailable - newRSUnavailablePodCount
if maxScaledDown <= 0 {
return false, nil
}
// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, maxScaledDown)
if err != nil {
return false, nil
}
glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount)
// Scale down old replica sets, need check maxUnavailable to ensure we can scale down
allRSs = append(oldRSs, newRS)
scaledDownCount, err := dc.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
if err != nil {
return false, nil
}
glog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount)
totalScaledDown := cleanupCount + scaledDownCount
return totalScaledDown > 0, nil
}
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment, maxCleanupCount int32) ([]*apps.ReplicaSet, int32, error) {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
// been deleted first and won't increase unavailability.
totalScaledDown := int32(0)
for i, targetRS := range oldRSs {
if totalScaledDown >= maxCleanupCount {
break
}
if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this replica set.
continue
}
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
// no unhealthy replicas found, no scaling required.
continue
}
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)))
newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
if newReplicasCount > *(targetRS.Spec.Replicas) {
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
}
_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil {
return nil, totalScaledDown, err
}
totalScaledDown += scaledDownCount
oldRSs[i] = updatedOldRS
}
return oldRSs, totalScaledDown, nil
}
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
// Need check maxUnavailable to ensure availability
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*apps.ReplicaSet, oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) (int32, error) {
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down.
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
// Find the number of available pods.
availablePodCount := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
if availablePodCount <= minAvailable {
// Cannot scale down.
return 0, nil
}
glog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name)
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
totalScaledDown := int32(0)
totalScaleDownCount := availablePodCount - minAvailable
for _, targetRS := range oldRSs {
if totalScaledDown >= totalScaleDownCount {
// No further scaling required.
break
}
if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this ReplicaSet.
continue
}
// Scale down.
scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown)))
newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
if newReplicasCount > *(targetRS.Spec.Replicas) {
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
}
_, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil {
return totalScaledDown, err
}
totalScaledDown += scaleDownCount
}
return totalScaledDown, nil
}

View File

@ -1,379 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"testing"
apps "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
)
func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxSurge intstr.IntOrString
oldReplicas int
newReplicas int
scaleExpected bool
expectedNewReplicas int
}{
{
// Should not scale up.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(0),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: true,
expectedNewReplicas: 2,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 5,
newReplicas: 0,
scaleExpected: true,
expectedNewReplicas: 7,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 2,
scaleExpected: false,
},
{
// Should scale down.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
oldReplicas: 2,
newReplicas: 11,
scaleExpected: true,
expectedNewReplicas: 10,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
allRSs := []*apps.ReplicaSet{newRS, oldRS}
maxUnavailable := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
fake := fake.Clientset{}
controller := &DeploymentController{
client: &fake,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileNewReplicaSet(allRSs, newRS, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected {
if scaled || len(fake.Actions()) > 0 {
t.Errorf("unexpected scaling: %v", fake.Actions())
}
continue
}
if test.scaleExpected && !scaled {
t.Errorf("expected scaling to occur")
continue
}
if len(fake.Actions()) != 1 {
t.Errorf("expected 1 action during scale, got: %v", fake.Actions())
continue
}
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*apps.ReplicaSet)
if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}
func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
oldReplicas int
newReplicas int
readyPodsFromOldRS int
readyPodsFromNewRS int
scaleExpected bool
expectedOldReplicas int
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 9,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect unhealthy replicas from old replica sets been cleaned up
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 9,
readyPodsFromNewRS: 0,
scaleExpected: true,
expectedOldReplicas: 8,
},
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
oldReplicas: 8,
newReplicas: 2,
readyPodsFromOldRS: 8,
readyPodsFromNewRS: 0,
scaleExpected: false,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
newSelector := map[string]string{"foo": "new"}
oldSelector := map[string]string{"foo": "old"}
newRS := rs("foo-new", test.newReplicas, newSelector, noTimestamp)
newRS.Status.AvailableReplicas = int32(test.readyPodsFromNewRS)
oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
oldRSs := []*apps.ReplicaSet{oldRS}
allRSs := []*apps.ReplicaSet{oldRS, newRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.reconcileOldReplicaSets(allRSs, oldRSs, newRS, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected && scaled {
t.Errorf("unexpected scaling: %v", fakeClientset.Actions())
}
if test.scaleExpected && !scaled {
t.Errorf("expected scaling to occur")
continue
}
continue
}
}
func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
tests := []struct {
oldReplicas int
readyPods int
unHealthyPods int
maxCleanupCount int
cleanupCountExpected int
}{
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 1,
cleanupCountExpected: 1,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 3,
cleanupCountExpected: 2,
},
{
oldReplicas: 10,
readyPods: 8,
unHealthyPods: 2,
maxCleanupCount: 0,
cleanupCountExpected: 0,
},
{
oldReplicas: 10,
readyPods: 10,
unHealthyPods: 0,
maxCleanupCount: 3,
cleanupCountExpected: 0,
},
}
for i, test := range tests {
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
oldRSs := []*apps.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(2)
maxUnavailable := intstr.FromInt(2)
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, deployment, int32(test.maxCleanupCount))
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if int(cleanupCount) != test.cleanupCountExpected {
t.Errorf("expected %v unhealthy replicas been cleaned up, got %v", test.cleanupCountExpected, cleanupCount)
continue
}
}
}
func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing.T) {
tests := []struct {
deploymentReplicas int
maxUnavailable intstr.IntOrString
readyPods int
oldReplicas int
scaleExpected bool
expectedOldReplicas int
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
expectedOldReplicas: 9,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
expectedOldReplicas: 8,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 8,
oldReplicas: 10,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 10,
oldReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
readyPods: 1,
oldReplicas: 10,
scaleExpected: false,
},
}
for i := range tests {
test := tests[i]
t.Logf("executing scenario %d", i)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
allRSs := []*apps.ReplicaSet{oldRS}
oldRSs := []*apps.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
client: &fakeClientset,
eventRecorder: &record.FakeRecorder{},
}
scaled, err := controller.scaleDownOldReplicaSetsForRollingUpdate(allRSs, oldRSs, deployment)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !test.scaleExpected {
if scaled != 0 {
t.Errorf("unexpected scaling: %v", fakeClientset.Actions())
}
continue
}
if test.scaleExpected && scaled == 0 {
t.Errorf("expected scaling to occur; actions: %v", fakeClientset.Actions())
continue
}
// There are both list and update actions logged, so extract the update
// action for verification.
var updateAction core.UpdateAction
for _, action := range fakeClientset.Actions() {
switch a := action.(type) {
case core.UpdateAction:
if updateAction != nil {
t.Errorf("expected only 1 update action; had %v and found %v", updateAction, a)
} else {
updateAction = a
}
}
}
if updateAction == nil {
t.Errorf("expected an update action")
continue
}
updated := updateAction.GetObject().(*apps.ReplicaSet)
if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a)
}
}
}

View File

@ -1,539 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"fmt"
"reflect"
"sort"
"strconv"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
func (dc *DeploymentController) syncStatusOnly(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
return dc.syncDeploymentStatus(allRSs, newRS, d)
}
// sync is responsible for reconciling deployments on scaling events or when they
// are paused.
func (dc *DeploymentController) sync(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return err
}
if err := dc.scale(d, newRS, oldRSs); err != nil {
// If we get an error while trying to scale, the deployment will be requeued
// so we can abort this resync
return err
}
// Clean up the deployment when it's paused and no rollback is in flight.
if d.Spec.Paused && getRollbackTo(d) == nil {
if err := dc.cleanupDeployment(oldRSs, d); err != nil {
return err
}
}
allRSs := append(oldRSs, newRS)
return dc.syncDeploymentStatus(allRSs, newRS, d)
}
// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
// that were paused for longer than progressDeadlineSeconds.
func (dc *DeploymentController) checkPausedConditions(d *apps.Deployment) error {
if d.Spec.ProgressDeadlineSeconds == nil {
return nil
}
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
if cond != nil && cond.Reason == deploymentutil.TimedOutReason {
// If we have reported lack of progress, do not overwrite it with a paused condition.
return nil
}
pausedCondExists := cond != nil && cond.Reason == deploymentutil.PausedDeployReason
needsUpdate := false
if d.Spec.Paused && !pausedCondExists {
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
} else if !d.Spec.Paused && pausedCondExists {
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
}
if !needsUpdate {
return nil
}
var err error
d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
return err
}
// getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated.
//
// rsList should come from getReplicaSetsForDeployment(d).
// podMap should come from getPodMapForDeployment(d, rsList).
//
// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV).
// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1),
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
// 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
//
// Note that currently the deployment controller is using caches to avoid querying the server for reads.
// This may lead to stale reads of replica sets, thus incorrect deployment status.
func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList, createIfNotExisted bool) (*apps.ReplicaSet, []*apps.ReplicaSet, error) {
_, allOldRSs := deploymentutil.FindOldReplicaSets(d, rsList)
// Get new replica set with the updated revision number
newRS, err := dc.getNewReplicaSet(d, rsList, allOldRSs, createIfNotExisted)
if err != nil {
return nil, nil, err
}
return newRS, allOldRSs, nil
}
// Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet.
// 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's).
// 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes.
// 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas.
// Note that the pod-template-hash will be added to adopted RSes and pods.
func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, oldRSs []*apps.ReplicaSet, createIfNotExisted bool) (*apps.ReplicaSet, error) {
existingNewRS := deploymentutil.FindNewReplicaSet(d, rsList)
// Calculate the max revision number among all old RSes
maxOldRevision := deploymentutil.MaxRevision(oldRSs)
// Calculate revision number for this new replica set
newRevision := strconv.FormatInt(maxOldRevision+1, 10)
// Latest replica set exists. We need to sync its annotations (includes copying all but
// annotationsToSkip from the parent deployment, and update revision, desiredReplicas,
// and maxReplicas) and also update the revision annotation in the deployment with the
// latest revision.
if existingNewRS != nil {
rsCopy := existingNewRS.DeepCopy()
// Set existing new replica set's annotation
annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(d, rsCopy, newRevision, true)
minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != d.Spec.MinReadySeconds
if annotationsUpdated || minReadySecondsNeedsUpdate {
rsCopy.Spec.MinReadySeconds = d.Spec.MinReadySeconds
return dc.client.AppsV1().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy)
}
// Should use the revision in existingNewRS's annotation, since it set by before
needsUpdate := deploymentutil.SetDeploymentRevision(d, rsCopy.Annotations[deploymentutil.RevisionAnnotation])
// If no other Progressing condition has been recorded and we need to estimate the progress
// of this deployment then it is likely that old users started caring about progress. In that
// case we need to take into account the first time we noticed their new replica set.
cond := deploymentutil.GetDeploymentCondition(d.Status, apps.DeploymentProgressing)
if d.Spec.ProgressDeadlineSeconds != nil && cond == nil {
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
}
if needsUpdate {
var err error
if d, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d); err != nil {
return nil, err
}
}
return rsCopy, nil
}
if !createIfNotExisted {
return nil, nil
}
// new ReplicaSet does not exist, create one.
newRSTemplate := *d.Spec.Template.DeepCopy()
podTemplateSpecHash := fmt.Sprintf("%d", controller.ComputeHash(&newRSTemplate, d.Status.CollisionCount))
newRSTemplate.Labels = labelsutil.CloneAndAddLabel(d.Spec.Template.Labels, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
// Add podTemplateHash label to selector.
newRSSelector := labelsutil.CloneSelectorAndAddLabel(d.Spec.Selector, apps.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash)
// Create new ReplicaSet
newRS := apps.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
// Make the name deterministic, to ensure idempotence
Name: d.Name + "-" + rand.SafeEncodeString(podTemplateSpecHash),
Namespace: d.Namespace,
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)},
Labels: newRSTemplate.Labels,
},
Spec: apps.ReplicaSetSpec{
Replicas: new(int32),
MinReadySeconds: d.Spec.MinReadySeconds,
Selector: newRSSelector,
Template: newRSTemplate,
},
}
allRSs := append(oldRSs, &newRS)
newReplicasCount, err := deploymentutil.NewRSNewReplicas(d, allRSs, &newRS)
if err != nil {
return nil, err
}
*(newRS.Spec.Replicas) = newReplicasCount
// Set new replica set's annotation
deploymentutil.SetNewReplicaSetAnnotations(d, &newRS, newRevision, false)
// Create the new ReplicaSet. If it already exists, then we need to check for possible
// hash collisions. If there is any other error, we need to report it in the status of
// the Deployment.
alreadyExists := false
createdRS, err := dc.client.AppsV1().ReplicaSets(d.Namespace).Create(&newRS)
switch {
// We may end up hitting this due to a slow cache or a fast resync of the Deployment.
case errors.IsAlreadyExists(err):
alreadyExists = true
// Fetch a copy of the ReplicaSet.
rs, rsErr := dc.rsLister.ReplicaSets(newRS.Namespace).Get(newRS.Name)
if rsErr != nil {
return nil, rsErr
}
// If the Deployment owns the ReplicaSet and the ReplicaSet's PodTemplateSpec is semantically
// deep equal to the PodTemplateSpec of the Deployment, it's the Deployment's new ReplicaSet.
// Otherwise, this is a hash collision and we need to increment the collisionCount field in
// the status of the Deployment and requeue to try the creation in the next sync.
controllerRef := metav1.GetControllerOf(rs)
if controllerRef != nil && controllerRef.UID == d.UID && deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) {
createdRS = rs
err = nil
break
}
// Matching ReplicaSet is not equal - increment the collisionCount in the DeploymentStatus
// and requeue the Deployment.
if d.Status.CollisionCount == nil {
d.Status.CollisionCount = new(int32)
}
preCollisionCount := *d.Status.CollisionCount
*d.Status.CollisionCount++
// Update the collisionCount for the Deployment and let it requeue by returning the original
// error.
_, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
if dErr == nil {
glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount)
}
return nil, err
case err != nil:
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
if d.Spec.ProgressDeadlineSeconds != nil {
cond := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
deploymentutil.SetDeploymentCondition(&d.Status, *cond)
// We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
_, _ = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
}
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
return nil, err
}
if !alreadyExists && newReplicasCount > 0 {
dc.eventRecorder.Eventf(d, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
}
needsUpdate := deploymentutil.SetDeploymentRevision(d, newRevision)
if !alreadyExists && d.Spec.ProgressDeadlineSeconds != nil {
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
condition := deploymentutil.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
}
if needsUpdate {
_, err = dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d)
}
return createdRS, err
}
// scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size
// of the new replica set and scaling down can decrease the sizes of the old ones, both of which would
// have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable
// replicas in the event of a problem with the rolled out template. Should run only on scaling events or
// when a deployment is paused and not during the normal rollout process.
func (dc *DeploymentController) scale(deployment *apps.Deployment, newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) error {
// If there is only one active replica set then we should scale that up to the full count of the
// deployment. If there is no active replica set, then we should scale up the newest replica set.
if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {
return nil
}
_, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment)
return err
}
// If the new replica set is saturated, old replica sets should be fully scaled down.
// This case handles replica set adoption during a saturated new replica set.
if deploymentutil.IsSaturated(deployment, newRS) {
for _, old := range controller.FilterActiveReplicaSets(oldRSs) {
if _, _, err := dc.scaleReplicaSetAndRecordEvent(old, 0, deployment); err != nil {
return err
}
}
return nil
}
// There are old replica sets with pods and the new replica set is not saturated.
// We need to proportionally scale all replica sets (new and old) in case of a
// rolling deployment.
if deploymentutil.IsRollingUpdate(deployment) {
allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS))
allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
allowedSize := int32(0)
if *(deployment.Spec.Replicas) > 0 {
allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
}
// Number of additional replicas that can be either added or removed from the total
// replicas count. These replicas should be distributed proportionally to the active
// replica sets.
deploymentReplicasToAdd := allowedSize - allRSsReplicas
// The additional replicas should be distributed proportionally amongst the active
// replica sets from the larger to the smaller in size replica set. Scaling direction
// drives what happens in case we are trying to scale replica sets of the same size.
// In such a case when scaling up, we should scale up newer replica sets first, and
// when scaling down, we should scale down older replica sets first.
var scalingOperation string
switch {
case deploymentReplicasToAdd > 0:
sort.Sort(controller.ReplicaSetsBySizeNewer(allRSs))
scalingOperation = "up"
case deploymentReplicasToAdd < 0:
sort.Sort(controller.ReplicaSetsBySizeOlder(allRSs))
scalingOperation = "down"
}
// Iterate over all active replica sets and estimate proportions for each of them.
// The absolute value of deploymentReplicasAdded should never exceed the absolute
// value of deploymentReplicasToAdd.
deploymentReplicasAdded := int32(0)
nameToSize := make(map[string]int32)
for i := range allRSs {
rs := allRSs[i]
// Estimate proportions if we have replicas to add, otherwise simply populate
// nameToSize with the current sizes for each replica set.
if deploymentReplicasToAdd != 0 {
proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded)
nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion
deploymentReplicasAdded += proportion
} else {
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
}
// Update all replica sets
for i := range allRSs {
rs := allRSs[i]
// Add/remove any leftovers to the largest replica set.
if i == 0 && deploymentReplicasToAdd != 0 {
leftover := deploymentReplicasToAdd - deploymentReplicasAdded
nameToSize[rs.Name] = nameToSize[rs.Name] + leftover
if nameToSize[rs.Name] < 0 {
nameToSize[rs.Name] = 0
}
}
// TODO: Use transactions when we have them.
if _, _, err := dc.scaleReplicaSet(rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil {
// Return as soon as we fail, the deployment is requeued
return err
}
}
}
return nil
}
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment) (bool, *apps.ReplicaSet, error) {
// No need to scale
if *(rs.Spec.Replicas) == newScale {
return false, rs, nil
}
var scalingOperation string
if *(rs.Spec.Replicas) < newScale {
scalingOperation = "up"
} else {
scalingOperation = "down"
}
scaled, newRS, err := dc.scaleReplicaSet(rs, newScale, deployment, scalingOperation)
return scaled, newRS, err
}
func (dc *DeploymentController) scaleReplicaSet(rs *apps.ReplicaSet, newScale int32, deployment *apps.Deployment, scalingOperation string) (bool, *apps.ReplicaSet, error) {
sizeNeedsUpdate := *(rs.Spec.Replicas) != newScale
annotationsNeedUpdate := deploymentutil.ReplicasAnnotationsNeedUpdate(rs, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
scaled := false
var err error
if sizeNeedsUpdate || annotationsNeedUpdate {
rsCopy := rs.DeepCopy()
*(rsCopy.Spec.Replicas) = newScale
deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
rs, err = dc.client.AppsV1().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
if err == nil && sizeNeedsUpdate {
scaled = true
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
}
}
return scaled, rs, err
}
// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets
// where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept
// around by default 1) for historical reasons and 2) for the ability to rollback a deployment.
func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, deployment *apps.Deployment) error {
if deployment.Spec.RevisionHistoryLimit == nil {
return nil
}
// Avoid deleting replica set with deletion timestamp set
aliveFilter := func(rs *apps.ReplicaSet) bool {
return rs != nil && rs.ObjectMeta.DeletionTimestamp == nil
}
cleanableRSes := controller.FilterReplicaSets(oldRSs, aliveFilter)
diff := int32(len(cleanableRSes)) - *deployment.Spec.RevisionHistoryLimit
if diff <= 0 {
return nil
}
sort.Sort(controller.ReplicaSetsByCreationTimestamp(cleanableRSes))
glog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name)
for i := int32(0); i < diff; i++ {
rs := cleanableRSes[i]
// Avoid delete replica set with non-zero replica counts
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration || rs.DeletionTimestamp != nil {
continue
}
glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name)
if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
// Return error instead of aggregating and continuing DELETEs on the theory
// that we may be overloading the api server.
return err
}
}
return nil
}
// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary
func (dc *DeploymentController) syncDeploymentStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error {
newStatus := calculateStatus(allRSs, newRS, d)
if reflect.DeepEqual(d.Status, newStatus) {
return nil
}
newDeployment := d
newDeployment.Status = newStatus
_, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment)
return err
}
// calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets.
func calculateStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, deployment *apps.Deployment) apps.DeploymentStatus {
availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
unavailableReplicas := totalReplicas - availableReplicas
// If unavailableReplicas is negative, then that means the Deployment has more available replicas running than
// desired, e.g. whenever it scales down. In such a case we should simply default unavailableReplicas to zero.
if unavailableReplicas < 0 {
unavailableReplicas = 0
}
status := apps.DeploymentStatus{
// TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value.
ObservedGeneration: deployment.Generation,
Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs),
UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*apps.ReplicaSet{newRS}),
ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs),
AvailableReplicas: availableReplicas,
UnavailableReplicas: unavailableReplicas,
CollisionCount: deployment.Status.CollisionCount,
}
// Copy conditions one by one so we won't mutate the original object.
conditions := deployment.Status.Conditions
for i := range conditions {
status.Conditions = append(status.Conditions, conditions[i])
}
if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) {
minAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
deploymentutil.SetDeploymentCondition(&status, *minAvailability)
} else {
noMinAvailability := deploymentutil.NewDeploymentCondition(apps.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
deploymentutil.SetDeploymentCondition(&status, *noMinAvailability)
}
return status
}
// isScalingEvent checks whether the provided deployment has been updated with a scaling event
// by looking at the desired-replicas annotation in the active replica sets of the deployment.
//
// rsList should come from getReplicaSetsForDeployment(d).
// podMap should come from getPodMapForDeployment(d, rsList).
func (dc *DeploymentController) isScalingEvent(d *apps.Deployment, rsList []*apps.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return false, err
}
allRSs := append(oldRSs, newRS)
for _, rs := range controller.FilterActiveReplicaSets(allRSs) {
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
if !ok {
continue
}
if desired != *(d.Spec.Replicas) {
return true, nil
}
}
return false, nil
}

View File

@ -1,435 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package deployment
import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
testclient "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
func intOrStrP(val int) *intstr.IntOrString {
intOrStr := intstr.FromInt(val)
return &intOrStr
}
func TestScale(t *testing.T) {
newTimestamp := metav1.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC)
oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC)
olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC)
var updatedTemplate = func(replicas int) *apps.Deployment {
d := newDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"})
d.Spec.Template.Labels["another"] = "label"
return d
}
tests := []struct {
name string
deployment *apps.Deployment
oldDeployment *apps.Deployment
newRS *apps.ReplicaSet
oldRSs []*apps.ReplicaSet
expectedNew *apps.ReplicaSet
expectedOld []*apps.ReplicaSet
wasntUpdated map[string]bool
desiredReplicasAnnotations map[string]int32
}{
{
name: "normal scaling event: 10 -> 12",
deployment: newDeployment("foo", 12, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{},
expectedNew: rs("foo-v1", 12, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{},
},
{
name: "normal scaling event: 10 -> 5",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, nil),
newRS: rs("foo-v1", 10, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{},
expectedNew: rs("foo-v1", 5, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{},
},
{
name: "proportional scaling: 5 -> 10",
deployment: newDeployment("foo", 10, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
},
{
name: "proportional scaling: 5 -> 3",
deployment: newDeployment("foo", 3, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v2", 2, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 1, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 2, nil, oldTimestamp)},
},
{
name: "proportional scaling: 9 -> 4",
deployment: newDeployment("foo", 4, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 9, nil, nil, nil, nil),
newRS: rs("foo-v2", 8, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 4, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 0, nil, oldTimestamp)},
},
{
name: "proportional scaling: 7 -> 10",
deployment: newDeployment("foo", 10, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 7, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 3, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 4, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
},
{
name: "proportional scaling: 13 -> 8",
deployment: newDeployment("foo", 8, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 13, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 8, nil, oldTimestamp), rs("foo-v1", 3, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 5, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales up the new replica set.
{
name: "leftover distribution: 3 -> 4",
deployment: newDeployment("foo", 4, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
},
// Scales down the older replica set.
{
name: "leftover distribution: 3 -> 2",
deployment: newDeployment("foo", 2, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 3, nil, nil, nil, nil),
newRS: rs("foo-v3", 1, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 1, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up the latest replica set first.
{
name: "proportional scaling (no new rs): 4 -> 5",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 4, nil, nil, nil, nil),
newRS: nil,
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
expectedNew: nil,
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 3, nil, oldTimestamp), rs("foo-v1", 2, nil, olderTimestamp)},
},
// Scales down to zero
{
name: "proportional scaling: 6 -> 0",
deployment: newDeployment("foo", 0, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 3, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 0, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
},
// Scales up from zero
{
name: "proportional scaling: 0 -> 6",
deployment: newDeployment("foo", 6, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 6, nil, nil, nil, nil),
newRS: rs("foo-v3", 0, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 6, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 0, nil, oldTimestamp), rs("foo-v1", 0, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true},
},
// Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 )
// Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to
// update.
{
name: "failed rs update",
deployment: newDeployment("foo", 5, nil, nil, nil, nil),
oldDeployment: newDeployment("foo", 5, nil, nil, nil, nil),
newRS: rs("foo-v3", 2, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 1, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
expectedNew: rs("foo-v3", 2, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 2, nil, oldTimestamp), rs("foo-v1", 1, nil, olderTimestamp)},
wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true},
desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)},
},
{
name: "deployment with surge pods",
deployment: newDeployment("foo", 20, nil, intOrStrP(2), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(2), nil, nil),
newRS: rs("foo-v2", 6, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 6, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 11, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 11, nil, oldTimestamp)},
},
{
name: "change both surge and size",
deployment: newDeployment("foo", 50, nil, intOrStrP(6), nil, nil),
oldDeployment: newDeployment("foo", 10, nil, intOrStrP(3), nil, nil),
newRS: rs("foo-v2", 5, nil, newTimestamp),
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 8, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 22, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 34, nil, oldTimestamp)},
},
{
name: "change both size and template",
deployment: updatedTemplate(14),
oldDeployment: newDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}),
newRS: nil,
oldRSs: []*apps.ReplicaSet{rs("foo-v2", 7, nil, newTimestamp), rs("foo-v1", 3, nil, oldTimestamp)},
expectedNew: nil,
expectedOld: []*apps.ReplicaSet{rs("foo-v2", 10, nil, newTimestamp), rs("foo-v1", 4, nil, oldTimestamp)},
},
{
name: "saturated but broken new replica set does not affect old pods",
deployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
oldDeployment: newDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil),
newRS: func() *apps.ReplicaSet {
rs := rs("foo-v2", 2, nil, newTimestamp)
rs.Status.AvailableReplicas = 0
return rs
}(),
oldRSs: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
expectedNew: rs("foo-v2", 2, nil, newTimestamp),
expectedOld: []*apps.ReplicaSet{rs("foo-v1", 1, nil, oldTimestamp)},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
_ = olderTimestamp
t.Log(test.name)
fake := fake.Clientset{}
dc := &DeploymentController{
client: &fake,
eventRecorder: &record.FakeRecorder{},
}
if test.newRS != nil {
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
desiredReplicas = desired
}
deploymentutil.SetReplicasAnnotations(test.newRS, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
}
for i := range test.oldRSs {
rs := test.oldRSs[i]
if rs == nil {
continue
}
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
desiredReplicas = desired
}
deploymentutil.SetReplicasAnnotations(rs, desiredReplicas, desiredReplicas+deploymentutil.MaxSurge(*test.oldDeployment))
}
if err := dc.scale(test.deployment, test.newRS, test.oldRSs); err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
return
}
// Construct the nameToSize map that will hold all the sizes we got our of tests
// Skip updating the map if the replica set wasn't updated since there will be
// no update action for it.
nameToSize := make(map[string]int32)
if test.newRS != nil {
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
}
for i := range test.oldRSs {
rs := test.oldRSs[i]
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
for _, action := range fake.Actions() {
rs := action.(testclient.UpdateAction).GetObject().(*apps.ReplicaSet)
if !test.wasntUpdated[rs.Name] {
nameToSize[rs.Name] = *(rs.Spec.Replicas)
}
}
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
return
}
if len(test.expectedOld) != len(test.oldRSs) {
t.Errorf("%s: expected %d old replica sets, got %d", test.name, len(test.expectedOld), len(test.oldRSs))
return
}
for n := range test.oldRSs {
rs := test.oldRSs[n]
expected := test.expectedOld[n]
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
}
}
})
}
}
func TestDeploymentController_cleanupDeployment(t *testing.T) {
selector := map[string]string{"foo": "bar"}
alreadyDeleted := newRSWithStatus("foo-1", 0, 0, selector)
now := metav1.Now()
alreadyDeleted.DeletionTimestamp = &now
tests := []struct {
oldRSs []*apps.ReplicaSet
revisionHistoryLimit int32
expectedDeletions int
}{
{
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
newRSWithStatus("foo-3", 0, 0, selector),
},
revisionHistoryLimit: 1,
expectedDeletions: 2,
},
{
// Only delete the replica set with Spec.Replicas = Status.Replicas = 0.
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 1, selector),
newRSWithStatus("foo-3", 1, 0, selector),
newRSWithStatus("foo-4", 1, 1, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 1,
},
{
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 0, 0, selector),
newRSWithStatus("foo-2", 0, 0, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 2,
},
{
oldRSs: []*apps.ReplicaSet{
newRSWithStatus("foo-1", 1, 1, selector),
newRSWithStatus("foo-2", 1, 1, selector),
},
revisionHistoryLimit: 0,
expectedDeletions: 0,
},
{
oldRSs: []*apps.ReplicaSet{
alreadyDeleted,
},
revisionHistoryLimit: 0,
expectedDeletions: 0,
},
}
for i := range tests {
test := tests[i]
t.Logf("scenario %d", i)
fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc())
controller, err := NewDeploymentController(informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), fake)
if err != nil {
t.Fatalf("error creating Deployment controller: %v", err)
}
controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady
controller.rsListerSynced = alwaysReady
controller.podListerSynced = alwaysReady
for _, rs := range test.oldRSs {
informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
d := newDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"})
controller.cleanupDeployment(test.oldRSs, d)
gotDeletions := 0
for _, action := range fake.Actions() {
if "delete" == action.GetVerb() {
gotDeletions++
}
}
if gotDeletions != test.expectedDeletions {
t.Errorf("expect %v old replica sets been deleted, but got %v", test.expectedDeletions, gotDeletions)
continue
}
}
}

View File

@ -1,75 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"deployment_util.go",
"pod_util.go",
"replicaset_util.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/deployment/util",
deps = [
"//pkg/apis/extensions:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/labels:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"deployment_util_test.go",
"hash_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/controller:go_default_library",
"//pkg/util/hash:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,920 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
"k8s.io/client-go/util/integer"
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
const (
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
RevisionAnnotation = "deployment.kubernetes.io/revision"
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
// in its replica sets. Helps in separating scaling events from the rollout process and for
// determining if the new replica set for a deployment is really saturated.
DesiredReplicasAnnotation = "deployment.kubernetes.io/desired-replicas"
// MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which
// is deployment.spec.replicas + maxSurge. Used by the underlying replica sets to estimate their
// proportions in case the deployment has surge replicas.
MaxReplicasAnnotation = "deployment.kubernetes.io/max-replicas"
// RollbackRevisionNotFound is not found rollback event reason
RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound"
// RollbackTemplateUnchanged is the template unchanged rollback event reason
RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged"
// RollbackDone is the done rollback event reason
RollbackDone = "DeploymentRollback"
// Reasons for deployment conditions
//
// Progressing:
//
// ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part
// of the rollout process.
ReplicaSetUpdatedReason = "ReplicaSetUpdated"
// FailedRSCreateReason is added in a deployment when it cannot create a new replica set.
FailedRSCreateReason = "ReplicaSetCreateError"
// NewReplicaSetReason is added in a deployment when it creates a new replica set.
NewReplicaSetReason = "NewReplicaSetCreated"
// FoundNewRSReason is added in a deployment when it adopts an existing replica set.
FoundNewRSReason = "FoundNewReplicaSet"
// NewRSAvailableReason is added in a deployment when its newest replica set is made available
// ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds
// is at least the minimum available pods that need to run for the deployment.
NewRSAvailableReason = "NewReplicaSetAvailable"
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
// within the given deadline (progressDeadlineSeconds).
TimedOutReason = "ProgressDeadlineExceeded"
// PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be
// estimated once a deployment is paused.
PausedDeployReason = "DeploymentPaused"
// ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally
// deployments that paused amidst a rollout and are bounded by a deadline.
ResumedDeployReason = "DeploymentResumed"
//
// Available:
//
// MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available.
MinimumReplicasAvailable = "MinimumReplicasAvailable"
// MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas
// available.
MinimumReplicasUnavailable = "MinimumReplicasUnavailable"
)
// NewDeploymentCondition creates a new deployment condition.
func NewDeploymentCondition(condType apps.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *apps.DeploymentCondition {
return &apps.DeploymentCondition{
Type: condType,
Status: status,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// GetDeploymentCondition returns the condition with the provided type.
func GetDeploymentCondition(status apps.DeploymentStatus, condType apps.DeploymentConditionType) *apps.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
func SetDeploymentCondition(status *apps.DeploymentStatus, condition apps.DeploymentCondition) {
currentCond := GetDeploymentCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// RemoveDeploymentCondition removes the deployment condition with the provided type.
func RemoveDeploymentCondition(status *apps.DeploymentStatus, condType apps.DeploymentConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
func filterOutCondition(conditions []apps.DeploymentCondition, condType apps.DeploymentConditionType) []apps.DeploymentCondition {
var newConditions []apps.DeploymentCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
// Useful for promoting replica set failure conditions into deployments.
func ReplicaSetToDeploymentCondition(cond apps.ReplicaSetCondition) apps.DeploymentCondition {
return apps.DeploymentCondition{
Type: apps.DeploymentConditionType(cond.Type),
Status: cond.Status,
LastTransitionTime: cond.LastTransitionTime,
LastUpdateTime: cond.LastTransitionTime,
Reason: cond.Reason,
Message: cond.Message,
}
}
// SetDeploymentRevision updates the revision for a deployment.
func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool {
updated := false
if deployment.Annotations == nil {
deployment.Annotations = make(map[string]string)
}
if deployment.Annotations[RevisionAnnotation] != revision {
deployment.Annotations[RevisionAnnotation] = revision
updated = true
}
return updated
}
// MaxRevision finds the highest revision in the replica sets
func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
max := int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
} else if v > max {
max = v
}
}
return max
}
// LastRevision finds the second max revision number in all replica sets (the last revision)
func LastRevision(allRSs []*apps.ReplicaSet) int64 {
max, secMax := int64(0), int64(0)
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
} else if v >= max {
secMax = max
max = v
} else if v > secMax {
secMax = v
}
}
return secMax
}
// Revision returns the revision number of the input object.
func Revision(obj runtime.Object) (int64, error) {
acc, err := meta.Accessor(obj)
if err != nil {
return 0, err
}
v, ok := acc.GetAnnotations()[RevisionAnnotation]
if !ok {
return 0, nil
}
return strconv.ParseInt(v, 10, 64)
}
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool) bool {
// First, copy deployment's annotations (except for apply and revision annotations)
annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS)
// Then, update replica set's revision annotation
if newRS.Annotations == nil {
newRS.Annotations = make(map[string]string)
}
oldRevision, ok := newRS.Annotations[RevisionAnnotation]
// The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number
// of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and
// newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision.
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
if err != nil {
if oldRevision != "" {
glog.Warningf("Updating replica set revision OldRevision not int %s", err)
return false
}
//If the RS annotation is empty then initialise it to 0
oldRevisionInt = 0
}
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
if err != nil {
glog.Warningf("Updating replica set revision NewRevision not int %s", err)
return false
}
if oldRevisionInt < newRevisionInt {
newRS.Annotations[RevisionAnnotation] = newRevision
annotationChanged = true
glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
}
// If a revision annotation already existed and this replica set was updated with a new revision
// then that means we are rolling back to this replica set. We need to preserve the old revisions
// for historical information.
if ok && annotationChanged {
revisionHistoryAnnotation := newRS.Annotations[RevisionHistoryAnnotation]
oldRevisions := strings.Split(revisionHistoryAnnotation, ",")
if len(oldRevisions[0]) == 0 {
newRS.Annotations[RevisionHistoryAnnotation] = oldRevision
} else {
oldRevisions = append(oldRevisions, oldRevision)
newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",")
}
}
// If the new replica set is about to be created, we need to add replica annotations to it.
if !exists && SetReplicasAnnotations(newRS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) {
annotationChanged = true
}
return annotationChanged
}
var annotationsToSkip = map[string]bool{
v1.LastAppliedConfigAnnotation: true,
RevisionAnnotation: true,
RevisionHistoryAnnotation: true,
DesiredReplicasAnnotation: true,
MaxReplicasAnnotation: true,
apps.DeprecatedRollbackTo: true,
}
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO: How to decide which annotations should / should not be copied?
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation(key string) bool {
return annotationsToSkip[key]
}
// copyDeploymentAnnotationsToReplicaSet copies deployment's annotations to replica set's annotations,
// and returns true if replica set's annotation is changed.
// Note that apply and revision annotations are not copied.
func copyDeploymentAnnotationsToReplicaSet(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
rsAnnotationsChanged := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
}
for k, v := range deployment.Annotations {
// newRS revision is updated automatically in getNewReplicaSet, and the deployment's revision number is then updated
// by copying its newRS revision number. We should not copy deployment's revision to its newRS, since the update of
// deployment revision number may fail (revision becomes stale) and the revision number in newRS is more reliable.
if skipCopyAnnotation(k) || rs.Annotations[k] == v {
continue
}
rs.Annotations[k] = v
rsAnnotationsChanged = true
}
return rsAnnotationsChanged
}
// SetDeploymentAnnotationsTo sets deployment's annotations as given RS's annotations.
// This action should be done if and only if the deployment is rolling back to this rs.
// Note that apply and revision annotations are not changed.
func SetDeploymentAnnotationsTo(deployment *apps.Deployment, rollbackToRS *apps.ReplicaSet) {
deployment.Annotations = getSkippedAnnotations(deployment.Annotations)
for k, v := range rollbackToRS.Annotations {
if !skipCopyAnnotation(k) {
deployment.Annotations[k] = v
}
}
}
func getSkippedAnnotations(annotations map[string]string) map[string]string {
skippedAnnotations := make(map[string]string)
for k, v := range annotations {
if skipCopyAnnotation(k) {
skippedAnnotations[k] = v
}
}
return skippedAnnotations
}
// FindActiveOrLatest returns the only active or the latest replica set in case there is at most one active
// replica set. If there are more active replica sets, then we should proportionally scale them.
func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps.ReplicaSet {
if newRS == nil && len(oldRSs) == 0 {
return nil
}
sort.Sort(sort.Reverse(controller.ReplicaSetsByCreationTimestamp(oldRSs)))
allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS))
switch len(allRSs) {
case 0:
// If there is no active replica set then we should return the newest.
if newRS != nil {
return newRS
}
return oldRSs[0]
case 1:
return allRSs[0]
default:
return nil
}
}
// GetDesiredReplicasAnnotation returns the number of desired replicas
func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(rs, DesiredReplicasAnnotation)
}
func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(rs, MaxReplicasAnnotation)
}
func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
annotationValue, ok := rs.Annotations[annotationKey]
if !ok {
return int32(0), false
}
intValue, err := strconv.Atoi(annotationValue)
if err != nil {
glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
return int32(0), false
}
return int32(intValue), true
}
// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations
func SetReplicasAnnotations(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
updated := false
if rs.Annotations == nil {
rs.Annotations = make(map[string]string)
}
desiredString := fmt.Sprintf("%d", desiredReplicas)
if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString {
rs.Annotations[DesiredReplicasAnnotation] = desiredString
updated = true
}
maxString := fmt.Sprintf("%d", maxReplicas)
if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString {
rs.Annotations[MaxReplicasAnnotation] = maxString
updated = true
}
return updated
}
// AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated
func ReplicasAnnotationsNeedUpdate(rs *apps.ReplicaSet, desiredReplicas, maxReplicas int32) bool {
if rs.Annotations == nil {
return true
}
desiredString := fmt.Sprintf("%d", desiredReplicas)
if hasString := rs.Annotations[DesiredReplicasAnnotation]; hasString != desiredString {
return true
}
maxString := fmt.Sprintf("%d", maxReplicas)
if hasString := rs.Annotations[MaxReplicasAnnotation]; hasString != maxString {
return true
}
return false
}
// MaxUnavailable returns the maximum unavailable pods a rolling deployment can take.
func MaxUnavailable(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 {
return int32(0)
}
// Error caught by validation
_, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
if maxUnavailable > *deployment.Spec.Replicas {
return *deployment.Spec.Replicas
}
return maxUnavailable
}
// MinAvailable returns the minimum available pods of a given deployment
func MinAvailable(deployment *apps.Deployment) int32 {
if !IsRollingUpdate(deployment) {
return int32(0)
}
return *(deployment.Spec.Replicas) - MaxUnavailable(*deployment)
}
// MaxSurge returns the maximum surge pods a rolling deployment can take.
func MaxSurge(deployment apps.Deployment) int32 {
if !IsRollingUpdate(&deployment) {
return int32(0)
}
// Error caught by validation
maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
return maxSurge
}
// GetProportion will estimate the proportion for the provided replica set using 1. the current size
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
return int32(0)
}
rsFraction := getReplicaSetFraction(*rs, d)
allowed := deploymentReplicasToAdd - deploymentReplicasAdded
if deploymentReplicasToAdd > 0 {
// Use the minimum between the replica set fraction and the maximum allowed replicas
// when scaling up. This way we ensure we will not scale up more than the allowed
// replicas we can add.
return integer.Int32Min(rsFraction, allowed)
}
// Use the maximum between the replica set fraction and the maximum allowed replicas
// when scaling down. This way we ensure we will not scale down more than the allowed
// replicas we can remove.
return integer.Int32Max(rsFraction, allowed)
}
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 {
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
if *(d.Spec.Replicas) == int32(0) {
return -*(rs.Spec.Replicas)
}
deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d)
annotatedReplicas, ok := getMaxReplicasAnnotation(&rs)
if !ok {
// If we cannot find the annotation then fallback to the current deployment size. Note that this
// will not be an accurate proportion estimation in case other replica sets have different values
// which means that the deployment was scaled at some point but we at least will stay in limits
// due to the min-max comparisons in getProportion.
annotatedReplicas = d.Status.Replicas
}
// We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas
// will never be zero here.
newRSsize := (float64(*(rs.Spec.Replicas) * deploymentReplicas)) / float64(annotatedReplicas)
return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
}
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) {
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
if err != nil {
return nil, nil, nil, err
}
oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList)
newRS := FindNewReplicaSet(deployment, rsList)
return oldRSes, allOldRSes, newRS, nil
}
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func GetOldReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) {
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
if err != nil {
return nil, nil, err
}
oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList)
return oldRSes, allOldRSes, nil
}
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
// Returns nil if the new replica set doesn't exist yet.
func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) {
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
if err != nil {
return nil, err
}
return FindNewReplicaSet(deployment, rsList), nil
}
// RsListFromClient returns an rsListFunc that wraps the given client.
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
rsList, err := c.ReplicaSets(namespace).List(options)
if err != nil {
return nil, err
}
var ret []*apps.ReplicaSet
for i := range rsList.Items {
ret = append(ret, &rsList.Items[i])
}
return ret, err
}
}
// TODO: switch this to full namespacers
type RsListFunc func(string, metav1.ListOptions) ([]*apps.ReplicaSet, error)
type podListFunc func(string, metav1.ListOptions) (*v1.PodList, error)
// ListReplicaSets returns a slice of RSes the given deployment targets.
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListReplicaSets(deployment *apps.Deployment, getRSList RsListFunc) ([]*apps.ReplicaSet, error) {
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getRSList(namespace, options)
if err != nil {
return nil, err
}
// Only include those whose ControllerRef matches the Deployment.
owned := make([]*apps.ReplicaSet, 0, len(all))
for _, rs := range all {
if metav1.IsControlledBy(rs, deployment) {
owned = append(owned, rs)
}
}
return owned, nil
}
// ListReplicaSetsInternal is ListReplicaSets for internalextensions.
// TODO: Remove the duplicate when call sites are updated to ListReplicaSets.
func ListReplicaSetsInternal(deployment *internalextensions.Deployment, getRSList func(string, metav1.ListOptions) ([]*internalextensions.ReplicaSet, error)) ([]*internalextensions.ReplicaSet, error) {
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getRSList(namespace, options)
if err != nil {
return nil, err
}
// Only include those whose ControllerRef matches the Deployment.
filtered := make([]*internalextensions.ReplicaSet, 0, len(all))
for _, rs := range all {
if metav1.IsControlledBy(rs, deployment) {
filtered = append(filtered, rs)
}
}
return filtered, nil
}
// ListPods returns a list of pods the given deployment targets.
// This needs a list of ReplicaSets for the Deployment,
// which can be found with ListReplicaSets().
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList podListFunc) (*v1.PodList, error) {
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
all, err := getPodList(namespace, options)
if err != nil {
return all, err
}
// Only include those whose ControllerRef points to a ReplicaSet that is in
// turn owned by this Deployment.
rsMap := make(map[types.UID]bool, len(rsList))
for _, rs := range rsList {
rsMap[rs.UID] = true
}
owned := &v1.PodList{Items: make([]v1.Pod, 0, len(all.Items))}
for i := range all.Items {
pod := &all.Items[i]
controllerRef := metav1.GetControllerOf(pod)
if controllerRef != nil && rsMap[controllerRef.UID] {
owned.Items = append(owned.Items, *pod)
}
}
return owned, nil
}
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, apps.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
func FindNewReplicaSet(deployment *apps.Deployment, rsList []*apps.ReplicaSet) *apps.ReplicaSet {
sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList))
for i := range rsList {
if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
// In rare cases, such as after cluster upgrades, Deployment may end up with
// having more than one new ReplicaSets that have the same template as its template,
// see https://github.com/kubernetes/kubernetes/issues/40415
// We deterministically choose the oldest new ReplicaSet.
return rsList[i]
}
}
// new ReplicaSet does not exist.
return nil
}
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given slice of RSes.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func FindOldReplicaSets(deployment *apps.Deployment, rsList []*apps.ReplicaSet) ([]*apps.ReplicaSet, []*apps.ReplicaSet) {
var requiredRSs []*apps.ReplicaSet
var allRSs []*apps.ReplicaSet
newRS := FindNewReplicaSet(deployment, rsList)
for _, rs := range rsList {
// Filter out new replica set
if newRS != nil && rs.UID == newRS.UID {
continue
}
allRSs = append(allRSs, rs)
if *(rs.Spec.Replicas) != 0 {
requiredRSs = append(requiredRSs, rs)
}
}
return requiredRSs, allRSs
}
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
func SetFromReplicaSetTemplate(deployment *apps.Deployment, template v1.PodTemplateSpec) *apps.Deployment {
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
deployment.Spec.Template.Spec = template.Spec
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
deployment.Spec.Template.ObjectMeta.Labels,
apps.DefaultDeploymentUniqueLabelKey)
return deployment
}
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
func GetReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalReplicas += *(rs.Spec.Replicas)
}
}
return totalReplicas
}
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
func GetActualReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalActualReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalActualReplicas += rs.Status.Replicas
}
}
return totalActualReplicas
}
// GetReadyReplicaCountForReplicaSets returns the number of ready pods corresponding to the given replica sets.
func GetReadyReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalReadyReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalReadyReplicas += rs.Status.ReadyReplicas
}
}
return totalReadyReplicas
}
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int32 {
totalAvailableReplicas := int32(0)
for _, rs := range replicaSets {
if rs != nil {
totalAvailableReplicas += rs.Status.AvailableReplicas
}
}
return totalAvailableReplicas
}
// IsRollingUpdate returns true if the strategy type is a rolling update.
func IsRollingUpdate(deployment *apps.Deployment) bool {
return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
}
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
// are updated and available, and no old pods are running.
func DeploymentComplete(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
newStatus.Replicas == *(deployment.Spec.Replicas) &&
newStatus.AvailableReplicas == *(deployment.Spec.Replicas) &&
newStatus.ObservedGeneration >= deployment.Generation
}
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
// current with the new status of the deployment that the controller is observing. More specifically,
// when new pods are scaled up or become ready or available, or old pods are scaled down, then we
// consider the deployment is progressing.
func DeploymentProgressing(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
oldStatus := deployment.Status
// Old replicas that need to be scaled down
oldStatusOldReplicas := oldStatus.Replicas - oldStatus.UpdatedReplicas
newStatusOldReplicas := newStatus.Replicas - newStatus.UpdatedReplicas
return (newStatus.UpdatedReplicas > oldStatus.UpdatedReplicas) ||
(newStatusOldReplicas < oldStatusOldReplicas) ||
newStatus.ReadyReplicas > deployment.Status.ReadyReplicas ||
newStatus.AvailableReplicas > deployment.Status.AvailableReplicas
}
// used for unit testing
var nowFn = func() time.Time { return time.Now() }
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
// exists.
func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
if deployment.Spec.ProgressDeadlineSeconds == nil {
return false
}
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
// again.
condition := GetDeploymentCondition(*newStatus, apps.DeploymentProgressing)
if condition == nil {
return false
}
// If the previous condition has been a successful rollout then we shouldn't try to
// estimate any progress. Scenario:
//
// * progressDeadlineSeconds is smaller than the difference between now and the time
// the last rollout finished in the past.
// * the creation of a new ReplicaSet triggers a resync of the Deployment prior to the
// cached copy of the Deployment getting updated with the status.condition that indicates
// the creation of the new ReplicaSet.
//
// The Deployment will be resynced and eventually its Progressing condition will catch
// up with the state of the world.
if condition.Reason == NewRSAvailableReason {
return false
}
if condition.Reason == TimedOutReason {
return true
}
// Look at the difference in seconds between now and the last time we reported any
// progress or tried to create a replica set, or resumed a paused deployment and
// compare against progressDeadlineSeconds.
from := condition.LastUpdateTime
now := nowFn()
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
timedOut := from.Add(delta).Before(now)
glog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
return timedOut
}
// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have.
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas
func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) (int32, error) {
switch deployment.Spec.Strategy.Type {
case apps.RollingUpdateDeploymentStrategyType:
// Check if we can scale up.
maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
if err != nil {
return 0, err
}
// Find the total number of pods
currentPodCount := GetReplicaCountForReplicaSets(allRSs)
maxTotalPods := *(deployment.Spec.Replicas) + int32(maxSurge)
if currentPodCount >= maxTotalPods {
// Cannot scale up.
return *(newRS.Spec.Replicas), nil
}
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
// Do not exceed the number of desired replicas.
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))))
return *(newRS.Spec.Replicas) + scaleUpCount, nil
case apps.RecreateDeploymentStrategyType:
return *(deployment.Spec.Replicas), nil
default:
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
}
}
// IsSaturated checks if the new replica set is saturated by comparing its size with its deployment size.
// Both the deployment and the replica set have to believe this replica set can own all of the desired
// replicas in the deployment and the annotation helps in achieving that. All pods of the ReplicaSet
// need to be available.
func IsSaturated(deployment *apps.Deployment, rs *apps.ReplicaSet) bool {
if rs == nil {
return false
}
desiredString := rs.Annotations[DesiredReplicasAnnotation]
desired, err := strconv.Atoi(desiredString)
if err != nil {
return false
}
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) &&
int32(desired) == *(deployment.Spec.Replicas) &&
rs.Status.AvailableReplicas == *(deployment.Spec.Replicas)
}
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
// Returns error if polling timesout.
func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
// TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface.
return wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := getDeploymentFunc()
if err != nil {
return false, err
}
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
})
}
// TODO: remove the duplicate
// WaitForObservedInternalDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
// Returns error if polling timesout.
func WaitForObservedDeploymentInternal(getDeploymentFunc func() (*internalextensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
deployment, err := getDeploymentFunc()
if err != nil {
return false, err
}
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
})
}
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
// step. For example:
//
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true)
if err != nil {
return 0, 0, err
}
unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false)
if err != nil {
return 0, 0, err
}
if surge == 0 && unavailable == 0 {
// Validation should never allow the user to explicitly use zero values for both maxSurge
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
// theory that surge might not work due to quota.
unavailable = 1
}
return int32(surge), int32(unavailable), nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,145 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"hash/adler32"
"strconv"
"strings"
"testing"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/controller"
hashutil "k8s.io/kubernetes/pkg/util/hash"
)
var podSpec string = `
{
"metadata": {
"creationTimestamp": null,
"labels": {
"app": "cats"
}
},
"spec": {
"containers": [
{
"name": "cats",
"image": "registry/test/cats:v0.@@VERSION@@.0",
"ports": [
{
"name": "http",
"containerPort": 9077,
"protocol": "TCP"
}
],
"env": [
{
"name": "DEPLOYMENT_ENVIRONMENT",
"value": "cats-stubbed-functional"
},
{
"name": "APP_NAME",
"value": "cats"
}
],
"resources": {
"limits": {
"cpu": "1",
"memory": "1Gi"
},
"requests": {
"cpu": "1",
"memory": "1Gi"
}
},
"livenessProbe": {
"httpGet": {
"path": "/private/status",
"port": 9077,
"scheme": "HTTP"
},
"initialDelaySeconds": 30,
"timeoutSeconds": 1,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 3
},
"readinessProbe": {
"httpGet": {
"path": "/private/status",
"port": 9077,
"scheme": "HTTP"
},
"initialDelaySeconds": 1,
"timeoutSeconds": 1,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 3
},
"terminationMessagePath": "/dev/termination-log",
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {}
}
}
`
func TestPodTemplateSpecHash(t *testing.T) {
seenHashes := make(map[uint32]int)
for i := 0; i < 1000; i++ {
specJson := strings.Replace(podSpec, "@@VERSION@@", strconv.Itoa(i), 1)
spec := v1.PodTemplateSpec{}
json.Unmarshal([]byte(specJson), &spec)
hash := controller.ComputeHash(&spec, nil)
if v, ok := seenHashes[hash]; ok {
t.Errorf("Hash collision, old: %d new: %d", v, i)
break
}
seenHashes[hash] = i
}
}
func BenchmarkAdler(b *testing.B) {
spec := v1.PodTemplateSpec{}
json.Unmarshal([]byte(podSpec), &spec)
for i := 0; i < b.N; i++ {
getPodTemplateSpecOldHash(spec)
}
}
func getPodTemplateSpecOldHash(template v1.PodTemplateSpec) uint32 {
podTemplateSpecHasher := adler32.New()
hashutil.DeepHashObject(podTemplateSpecHasher, template)
return podTemplateSpecHasher.Sum32()
}
func BenchmarkFnv(b *testing.B) {
spec := v1.PodTemplateSpec{}
json.Unmarshal([]byte(podSpec), &spec)
for i := 0; i < b.N; i++ {
controller.ComputeHash(&spec, nil)
}
}

View File

@ -1,60 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/util/retry"
)
// TODO: use client library instead when it starts to support update retries
// see https://github.com/kubernetes/kubernetes/issues/21479
type updatePodFunc func(pod *v1.Pod) error
// UpdatePodWithRetries updates a pod with given applyUpdate function.
func UpdatePodWithRetries(podClient v1core.PodInterface, podLister corelisters.PodLister, namespace, name string, applyUpdate updatePodFunc) (*v1.Pod, error) {
var pod *v1.Pod
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var err error
pod, err = podLister.Pods(namespace).Get(name)
if err != nil {
return err
}
pod = pod.DeepCopy()
// Apply the update, then attempt to push it to the apiserver.
if applyErr := applyUpdate(pod); applyErr != nil {
return applyErr
}
pod, err = podClient.Update(pod)
return err
})
// Ignore the precondition violated error, this pod is already updated
// with the desired label.
if retryErr == errorsutil.ErrPreconditionViolated {
glog.V(4).Infof("Pod %s/%s precondition doesn't hold, skip updating it.", namespace, name)
retryErr = nil
}
return pod, retryErr
}

View File

@ -1,60 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"github.com/golang/glog"
apps "k8s.io/api/apps/v1"
errorsutil "k8s.io/apimachinery/pkg/util/errors"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
"k8s.io/client-go/util/retry"
)
// TODO: use client library instead when it starts to support update retries
// see https://github.com/kubernetes/kubernetes/issues/21479
type updateRSFunc func(rs *apps.ReplicaSet) error
// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored.
// The returned bool value can be used to tell if the RS is actually updated.
func UpdateRSWithRetries(rsClient appsclient.ReplicaSetInterface, rsLister appslisters.ReplicaSetLister, namespace, name string, applyUpdate updateRSFunc) (*apps.ReplicaSet, error) {
var rs *apps.ReplicaSet
retryErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
var err error
rs, err = rsLister.ReplicaSets(namespace).Get(name)
if err != nil {
return err
}
rs = rs.DeepCopy()
// Apply the update, then attempt to push it to the apiserver.
if applyErr := applyUpdate(rs); applyErr != nil {
return applyErr
}
rs, err = rsClient.Update(rs)
return err
})
// Ignore the precondition violated error, but the RS isn't updated.
if retryErr == errorsutil.ErrPreconditionViolated {
glog.V(4).Infof("Replica set %s/%s precondition doesn't hold, skip updating it.", namespace, name)
retryErr = nil
}
return rs, retryErr
}

View File

@ -1,77 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["disruption.go"],
importpath = "k8s.io/kubernetes/pkg/controller/disruption",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["disruption_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core/install:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,2 +0,0 @@
reviewers:
- sig-apps-reviewers

View File

@ -1,743 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package disruption
import (
"fmt"
"reflect"
"time"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1beta1"
coreinformers "k8s.io/client-go/informers/core/v1"
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
policyclientset "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
appslisters "k8s.io/client-go/listers/apps/v1beta1"
corelisters "k8s.io/client-go/listers/core/v1"
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
policylisters "k8s.io/client-go/listers/policy/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog"
)
const statusUpdateRetries = 2
// DeletionTimeout sets maximum time from the moment a pod is added to DisruptedPods in PDB.Status
// to the time when the pod is expected to be seen by PDB controller as having been marked for deletion.
// If the pod was not marked for deletion during that time it is assumed that it won't be deleted at
// all and the corresponding entry can be removed from pdb.Status.DisruptedPods. It is assumed that
// pod/pdb apiserver to controller latency is relatively small (like 1-2sec) so the below value should
// be more than enough.
// If the controller is running on a different node it is important that the two nodes have synced
// clock (via ntp for example). Otherwise PodDisruptionBudget controller may not provide enough
// protection against unwanted pod disruptions.
const DeletionTimeout = 2 * 60 * time.Second
type updater func(*policy.PodDisruptionBudget) error
type DisruptionController struct {
kubeClient clientset.Interface
pdbLister policylisters.PodDisruptionBudgetLister
pdbListerSynced cache.InformerSynced
podLister corelisters.PodLister
podListerSynced cache.InformerSynced
rcLister corelisters.ReplicationControllerLister
rcListerSynced cache.InformerSynced
rsLister extensionslisters.ReplicaSetLister
rsListerSynced cache.InformerSynced
dLister extensionslisters.DeploymentLister
dListerSynced cache.InformerSynced
ssLister appslisters.StatefulSetLister
ssListerSynced cache.InformerSynced
// PodDisruptionBudget keys that need to be synced.
queue workqueue.RateLimitingInterface
recheckQueue workqueue.DelayingInterface
broadcaster record.EventBroadcaster
recorder record.EventRecorder
getUpdater func() updater
}
// controllerAndScale is used to return (controller, scale) pairs from the
// controller finder functions.
type controllerAndScale struct {
types.UID
scale int32
}
// podControllerFinder is a function type that maps a pod to a list of
// controllers and their scale.
type podControllerFinder func(*v1.Pod) (*controllerAndScale, error)
func NewDisruptionController(
podInformer coreinformers.PodInformer,
pdbInformer policyinformers.PodDisruptionBudgetInformer,
rcInformer coreinformers.ReplicationControllerInformer,
rsInformer extensionsinformers.ReplicaSetInformer,
dInformer extensionsinformers.DeploymentInformer,
ssInformer appsinformers.StatefulSetInformer,
kubeClient clientset.Interface,
) *DisruptionController {
dc := &DisruptionController{
kubeClient: kubeClient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "disruption"),
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
broadcaster: record.NewBroadcaster(),
}
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
dc.getUpdater = func() updater { return dc.writePdbStatus }
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addPod,
UpdateFunc: dc.updatePod,
DeleteFunc: dc.deletePod,
})
dc.podLister = podInformer.Lister()
dc.podListerSynced = podInformer.Informer().HasSynced
pdbInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDb,
UpdateFunc: dc.updateDb,
DeleteFunc: dc.removeDb,
},
30*time.Second,
)
dc.pdbLister = pdbInformer.Lister()
dc.pdbListerSynced = pdbInformer.Informer().HasSynced
dc.rcLister = rcInformer.Lister()
dc.rcListerSynced = rcInformer.Informer().HasSynced
dc.rsLister = rsInformer.Lister()
dc.rsListerSynced = rsInformer.Informer().HasSynced
dc.dLister = dInformer.Lister()
dc.dListerSynced = dInformer.Informer().HasSynced
dc.ssLister = ssInformer.Lister()
dc.ssListerSynced = ssInformer.Informer().HasSynced
return dc
}
// TODO(mml): When controllerRef is implemented (#2210), we *could* simply
// return controllers without their scales, and access scale type-generically
// via the scale subresource. That may not be as much of a win as it sounds,
// however. We are accessing everything through the pkg/client/cache API that
// we have to set up and tune to the types we know we'll be accessing anyway,
// and we may well need further tweaks just to be able to access scale
// subresources.
func (dc *DisruptionController) finders() []podControllerFinder {
return []podControllerFinder{dc.getPodReplicationController, dc.getPodDeployment, dc.getPodReplicaSet,
dc.getPodStatefulSet}
}
var (
controllerKindRS = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet")
controllerKindSS = apps.SchemeGroupVersion.WithKind("StatefulSet")
controllerKindRC = v1.SchemeGroupVersion.WithKind("ReplicationController")
controllerKindDep = v1beta1.SchemeGroupVersion.WithKind("Deployment")
)
// getPodReplicaSet finds a replicaset which has no matching deployments.
func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
}
if controllerRef.Kind != controllerKindRS.Kind {
return nil, nil
}
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
}
if rs.UID != controllerRef.UID {
return nil, nil
}
controllerRef = metav1.GetControllerOf(rs)
if controllerRef != nil && controllerRef.Kind == controllerKindDep.Kind {
// Skip RS if it's controlled by a Deployment.
return nil, nil
}
return &controllerAndScale{rs.UID, *(rs.Spec.Replicas)}, nil
}
// getPodStatefulSet returns the statefulset managing the given pod.
func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
}
if controllerRef.Kind != controllerKindSS.Kind {
return nil, nil
}
ss, err := dc.ssLister.StatefulSets(pod.Namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
}
if ss.UID != controllerRef.UID {
return nil, nil
}
return &controllerAndScale{ss.UID, *(ss.Spec.Replicas)}, nil
}
// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
}
if controllerRef.Kind != controllerKindRS.Kind {
return nil, nil
}
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
}
if rs.UID != controllerRef.UID {
return nil, nil
}
controllerRef = metav1.GetControllerOf(rs)
if controllerRef == nil {
return nil, nil
}
if controllerRef.Kind != controllerKindDep.Kind {
return nil, nil
}
deployment, err := dc.dLister.Deployments(rs.Namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
}
if deployment.UID != controllerRef.UID {
return nil, nil
}
return &controllerAndScale{deployment.UID, *(deployment.Spec.Replicas)}, nil
}
func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*controllerAndScale, error) {
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
return nil, nil
}
if controllerRef.Kind != controllerKindRC.Kind {
return nil, nil
}
rc, err := dc.rcLister.ReplicationControllers(pod.Namespace).Get(controllerRef.Name)
if err != nil {
// The only possible error is NotFound, which is ok here.
return nil, nil
}
if rc.UID != controllerRef.UID {
return nil, nil
}
return &controllerAndScale{rc.UID, *(rc.Spec.Replicas)}, nil
}
func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dc.queue.ShutDown()
glog.Infof("Starting disruption controller")
defer glog.Infof("Shutting down disruption controller")
if !controller.WaitForCacheSync("disruption", stopCh, dc.podListerSynced, dc.pdbListerSynced, dc.rcListerSynced, dc.rsListerSynced, dc.dListerSynced, dc.ssListerSynced) {
return
}
if dc.kubeClient != nil {
glog.Infof("Sending events to api server.")
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.CoreV1().Events("")})
} else {
glog.Infof("No api server defined - no events will be sent to API server.")
}
go wait.Until(dc.worker, time.Second, stopCh)
go wait.Until(dc.recheckWorker, time.Second, stopCh)
<-stopCh
}
func (dc *DisruptionController) addDb(obj interface{}) {
pdb := obj.(*policy.PodDisruptionBudget)
glog.V(4).Infof("add DB %q", pdb.Name)
dc.enqueuePdb(pdb)
}
func (dc *DisruptionController) updateDb(old, cur interface{}) {
// TODO(mml) ignore updates where 'old' is equivalent to 'cur'.
pdb := cur.(*policy.PodDisruptionBudget)
glog.V(4).Infof("update DB %q", pdb.Name)
dc.enqueuePdb(pdb)
}
func (dc *DisruptionController) removeDb(obj interface{}) {
pdb := obj.(*policy.PodDisruptionBudget)
glog.V(4).Infof("remove DB %q", pdb.Name)
dc.enqueuePdb(pdb)
}
func (dc *DisruptionController) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
glog.V(4).Infof("addPod called on pod %q", pod.Name)
pdb := dc.getPdbForPod(pod)
if pdb == nil {
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
return
}
glog.V(4).Infof("addPod %q -> PDB %q", pod.Name, pdb.Name)
dc.enqueuePdb(pdb)
}
func (dc *DisruptionController) updatePod(old, cur interface{}) {
pod := cur.(*v1.Pod)
glog.V(4).Infof("updatePod called on pod %q", pod.Name)
pdb := dc.getPdbForPod(pod)
if pdb == nil {
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
return
}
glog.V(4).Infof("updatePod %q -> PDB %q", pod.Name, pdb.Name)
dc.enqueuePdb(pdb)
}
func (dc *DisruptionController) deletePod(obj interface{}) {
pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new ReplicaSet will not be woken up till the periodic
// resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %+v", obj)
return
}
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
return
}
}
glog.V(4).Infof("deletePod called on pod %q", pod.Name)
pdb := dc.getPdbForPod(pod)
if pdb == nil {
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
return
}
glog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name)
dc.enqueuePdb(pdb)
}
func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) {
key, err := controller.KeyFunc(pdb)
if err != nil {
glog.Errorf("Cound't get key for PodDisruptionBudget object %+v: %v", pdb, err)
return
}
dc.queue.Add(key)
}
func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBudget, delay time.Duration) {
key, err := controller.KeyFunc(pdb)
if err != nil {
glog.Errorf("Cound't get key for PodDisruptionBudget object %+v: %v", pdb, err)
return
}
dc.recheckQueue.AddAfter(key, delay)
}
func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionBudget {
// GetPodPodDisruptionBudgets returns an error only if no
// PodDisruptionBudgets are found. We don't return that as an error to the
// caller.
pdbs, err := dc.pdbLister.GetPodPodDisruptionBudgets(pod)
if err != nil {
glog.V(4).Infof("No PodDisruptionBudgets found for pod %v, PodDisruptionBudget controller will avoid syncing.", pod.Name)
return nil
}
if len(pdbs) > 1 {
msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name)
glog.Warning(msg)
dc.recorder.Event(pod, v1.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
}
return pdbs[0]
}
// This function returns pods using the PodDisruptionBudget object.
// IMPORTANT NOTE : the returned pods should NOT be modified.
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
sel, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
if sel.Empty() {
return []*v1.Pod{}, nil
}
if err != nil {
return []*v1.Pod{}, err
}
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
if err != nil {
return []*v1.Pod{}, err
}
return pods, nil
}
func (dc *DisruptionController) worker() {
for dc.processNextWorkItem() {
}
}
func (dc *DisruptionController) processNextWorkItem() bool {
dKey, quit := dc.queue.Get()
if quit {
return false
}
defer dc.queue.Done(dKey)
err := dc.sync(dKey.(string))
if err == nil {
dc.queue.Forget(dKey)
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing PodDisruptionBudget %v, requeuing: %v", dKey.(string), err))
dc.queue.AddRateLimited(dKey)
return true
}
func (dc *DisruptionController) recheckWorker() {
for dc.processNextRecheckWorkItem() {
}
}
func (dc *DisruptionController) processNextRecheckWorkItem() bool {
dKey, quit := dc.recheckQueue.Get()
if quit {
return false
}
defer dc.recheckQueue.Done(dKey)
dc.queue.AddRateLimited(dKey)
return true
}
func (dc *DisruptionController) sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
pdb, err := dc.pdbLister.PodDisruptionBudgets(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(4).Infof("PodDisruptionBudget %q has been deleted", key)
return nil
}
if err != nil {
return err
}
if err := dc.trySync(pdb); err != nil {
glog.Errorf("Failed to sync pdb %s/%s: %v", pdb.Namespace, pdb.Name, err)
return dc.failSafe(pdb)
}
return nil
}
func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
pods, err := dc.getPodsForPdb(pdb)
if err != nil {
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "NoPods", "Failed to get pods: %v", err)
return err
}
if len(pods) == 0 {
dc.recorder.Eventf(pdb, v1.EventTypeNormal, "NoPods", "No matching pods found")
}
expectedCount, desiredHealthy, err := dc.getExpectedPodCount(pdb, pods)
if err != nil {
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "CalculateExpectedPodCountFailed", "Failed to calculate the number of expected pods: %v", err)
return err
}
currentTime := time.Now()
disruptedPods, recheckTime := dc.buildDisruptedPodMap(pods, pdb, currentTime)
currentHealthy := countHealthyPods(pods, disruptedPods, currentTime)
err = dc.updatePdbStatus(pdb, currentHealthy, desiredHealthy, expectedCount, disruptedPods)
if err == nil && recheckTime != nil {
// There is always at most one PDB waiting with a particular name in the queue,
// and each PDB in the queue is associated with the lowest timestamp
// that was supplied when a PDB with that name was added.
dc.enqueuePdbForRecheck(pdb, recheckTime.Sub(currentTime))
}
return err
}
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount, desiredHealthy int32, err error) {
err = nil
// TODO(davidopp): consider making the way expectedCount and rules about
// permitted controller configurations (specifically, considering it an error
// if a pod covered by a PDB has 0 controllers or > 1 controller) should be
// handled the same way for integer and percentage minAvailable
if pdb.Spec.MaxUnavailable != nil {
expectedCount, err = dc.getExpectedScale(pdb, pods)
if err != nil {
return
}
var maxUnavailable int
maxUnavailable, err = intstr.GetValueFromIntOrPercent(pdb.Spec.MaxUnavailable, int(expectedCount), true)
if err != nil {
return
}
desiredHealthy = expectedCount - int32(maxUnavailable)
if desiredHealthy < 0 {
desiredHealthy = 0
}
} else if pdb.Spec.MinAvailable != nil {
if pdb.Spec.MinAvailable.Type == intstr.Int {
desiredHealthy = pdb.Spec.MinAvailable.IntVal
expectedCount = int32(len(pods))
} else if pdb.Spec.MinAvailable.Type == intstr.String {
expectedCount, err = dc.getExpectedScale(pdb, pods)
if err != nil {
return
}
var minAvailable int
minAvailable, err = intstr.GetValueFromIntOrPercent(pdb.Spec.MinAvailable, int(expectedCount), true)
if err != nil {
return
}
desiredHealthy = int32(minAvailable)
}
}
return
}
func (dc *DisruptionController) getExpectedScale(pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount int32, err error) {
// When the user specifies a fraction of pods that must be available, we
// use as the fraction's denominator
// SUM_{all c in C} scale(c)
// where C is the union of C_p1, C_p2, ..., C_pN
// and each C_pi is the set of controllers controlling the pod pi
// k8s only defines what will happens when 0 or 1 controllers control a
// given pod. We explicitly exclude the 0 controllers case here, and we
// report an error if we find a pod with more than 1 controller. Thus in
// practice each C_pi is a set of exactly 1 controller.
// A mapping from controllers to their scale.
controllerScale := map[types.UID]int32{}
// 1. Find the controller for each pod. If any pod has 0 controllers,
// that's an error. With ControllerRef, a pod can only have 1 controller.
for _, pod := range pods {
foundController := false
for _, finder := range dc.finders() {
var controllerNScale *controllerAndScale
controllerNScale, err = finder(pod)
if err != nil {
return
}
if controllerNScale != nil {
controllerScale[controllerNScale.UID] = controllerNScale.scale
foundController = true
break
}
}
if !foundController {
err = fmt.Errorf("found no controllers for pod %q", pod.Name)
dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllers", err.Error())
return
}
}
// 2. Add up all the controllers.
expectedCount = 0
for _, count := range controllerScale {
expectedCount += count
}
return
}
func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]metav1.Time, currentTime time.Time) (currentHealthy int32) {
Pod:
for _, pod := range pods {
// Pod is being deleted.
if pod.DeletionTimestamp != nil {
continue
}
// Pod is expected to be deleted soon.
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
continue
}
if podutil.IsPodReady(pod) {
currentHealthy++
continue Pod
}
}
return
}
// Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted
// or not-deleted at all items. Also returns an information when this check should be repeated.
func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]metav1.Time, *time.Time) {
disruptedPods := pdb.Status.DisruptedPods
result := make(map[string]metav1.Time)
var recheckTime *time.Time
if disruptedPods == nil || len(disruptedPods) == 0 {
return result, recheckTime
}
for _, pod := range pods {
if pod.DeletionTimestamp != nil {
// Already being deleted.
continue
}
disruptionTime, found := disruptedPods[pod.Name]
if !found {
// Pod not on the list.
continue
}
expectedDeletion := disruptionTime.Time.Add(DeletionTimeout)
if expectedDeletion.Before(currentTime) {
glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name)
dc.recorder.Eventf(pod, v1.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
pdb.Namespace, pdb.Namespace)
} else {
if recheckTime == nil || expectedDeletion.Before(*recheckTime) {
recheckTime = &expectedDeletion
}
result[pod.Name] = disruptionTime
}
}
return result, recheckTime
}
// failSafe is an attempt to at least update the PodDisruptionsAllowed field to
// 0 if everything else has failed. This is one place we
// implement the "fail open" part of the design since if we manage to update
// this field correctly, we will prevent the /evict handler from approving an
// eviction when it may be unsafe to do so.
func (dc *DisruptionController) failSafe(pdb *policy.PodDisruptionBudget) error {
newPdb := pdb.DeepCopy()
newPdb.Status.PodDisruptionsAllowed = 0
return dc.getUpdater()(newPdb)
}
func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget, currentHealthy, desiredHealthy, expectedCount int32,
disruptedPods map[string]metav1.Time) error {
// We require expectedCount to be > 0 so that PDBs which currently match no
// pods are in a safe state when their first pods appear but this controller
// has not updated their status yet. This isn't the only race, but it's a
// common one that's easy to detect.
disruptionsAllowed := currentHealthy - desiredHealthy
if expectedCount <= 0 || disruptionsAllowed <= 0 {
disruptionsAllowed = 0
}
if pdb.Status.CurrentHealthy == currentHealthy &&
pdb.Status.DesiredHealthy == desiredHealthy &&
pdb.Status.ExpectedPods == expectedCount &&
pdb.Status.PodDisruptionsAllowed == disruptionsAllowed &&
reflect.DeepEqual(pdb.Status.DisruptedPods, disruptedPods) &&
pdb.Status.ObservedGeneration == pdb.Generation {
return nil
}
newPdb := pdb.DeepCopy()
newPdb.Status = policy.PodDisruptionBudgetStatus{
CurrentHealthy: currentHealthy,
DesiredHealthy: desiredHealthy,
ExpectedPods: expectedCount,
PodDisruptionsAllowed: disruptionsAllowed,
DisruptedPods: disruptedPods,
ObservedGeneration: pdb.Generation,
}
return dc.getUpdater()(newPdb)
}
// refresh tries to re-GET the given PDB. If there are any errors, it just
// returns the old PDB. Intended to be used in a retry loop where it runs a
// bounded number of times.
func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy.PodDisruptionBudget) *policy.PodDisruptionBudget {
newPdb, err := pdbClient.Get(pdb.Name, metav1.GetOptions{})
if err == nil {
return newPdb
} else {
return pdb
}
}
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
pdbClient := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace)
st := pdb.Status
var err error
for i, pdb := 0, pdb; i < statusUpdateRetries; i, pdb = i+1, refresh(pdbClient, pdb) {
pdb.Status = st
if _, err = pdbClient.UpdateStatus(pdb); err == nil {
break
}
}
return err
}

View File

@ -1,739 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package disruption
import (
"fmt"
"reflect"
"runtime/debug"
"testing"
"time"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/controller"
"github.com/Azure/go-autorest/autorest/to"
)
type pdbStates map[string]policy.PodDisruptionBudget
var alwaysReady = func() bool { return true }
func (ps *pdbStates) Set(pdb *policy.PodDisruptionBudget) error {
key, err := controller.KeyFunc(pdb)
if err != nil {
return err
}
(*ps)[key] = *pdb.DeepCopy()
return nil
}
func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
return (*ps)[key]
}
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32,
disruptedPodMap map[string]metav1.Time) {
actualPDB := ps.Get(key)
expectedStatus := policy.PodDisruptionBudgetStatus{
PodDisruptionsAllowed: disruptionsAllowed,
CurrentHealthy: currentHealthy,
DesiredHealthy: desiredHealthy,
ExpectedPods: expectedPods,
DisruptedPods: disruptedPodMap,
ObservedGeneration: actualPDB.Generation,
}
actualStatus := actualPDB.Status
if !reflect.DeepEqual(actualStatus, expectedStatus) {
debug.PrintStack()
t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
}
}
func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
pdb := ps.Get(key)
if pdb.Status.PodDisruptionsAllowed != disruptionsAllowed {
debug.PrintStack()
t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.PodDisruptionsAllowed)
}
}
type disruptionController struct {
*DisruptionController
podStore cache.Store
pdbStore cache.Store
rcStore cache.Store
rsStore cache.Store
dStore cache.Store
ssStore cache.Store
}
func newFakeDisruptionController() (*disruptionController, *pdbStates) {
ps := &pdbStates{}
informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc())
dc := NewDisruptionController(
informerFactory.Core().V1().Pods(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Extensions().V1beta1().Deployments(),
informerFactory.Apps().V1beta1().StatefulSets(),
nil,
)
dc.getUpdater = func() updater { return ps.Set }
dc.podListerSynced = alwaysReady
dc.pdbListerSynced = alwaysReady
dc.rcListerSynced = alwaysReady
dc.rsListerSynced = alwaysReady
dc.dListerSynced = alwaysReady
dc.ssListerSynced = alwaysReady
return &disruptionController{
dc,
informerFactory.Core().V1().Pods().Informer().GetStore(),
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Informer().GetStore(),
informerFactory.Core().V1().ReplicationControllers().Informer().GetStore(),
informerFactory.Extensions().V1beta1().ReplicaSets().Informer().GetStore(),
informerFactory.Extensions().V1beta1().Deployments().Informer().GetStore(),
informerFactory.Apps().V1beta1().StatefulSets().Informer().GetStore(),
}, ps
}
func fooBar() map[string]string {
return map[string]string{"foo": "bar"}
}
func newSel(labels map[string]string) *metav1.LabelSelector {
return &metav1.LabelSelector{MatchLabels: labels}
}
func newSelFooBar() *metav1.LabelSelector {
return newSel(map[string]string{"foo": "bar"})
}
func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
pdb := &policy.PodDisruptionBudget{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: policy.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: newSelFooBar(),
},
}
pdbName, err := controller.KeyFunc(pdb)
if err != nil {
t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
}
return pdb, pdbName
}
func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
pdb := &policy.PodDisruptionBudget{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
},
Spec: policy.PodDisruptionBudgetSpec{
MaxUnavailable: &maxUnavailable,
Selector: newSelFooBar(),
},
}
pdbName, err := controller.KeyFunc(pdb)
if err != nil {
t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
}
return pdb, pdbName
}
func updatePodOwnerToRc(t *testing.T, pod *v1.Pod, rc *v1.ReplicationController) {
var controllerReference metav1.OwnerReference
var trueVar = true
controllerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: controllerKindRC.GroupVersion().String(), Kind: controllerKindRC.Kind, Name: rc.Name, Controller: &trueVar}
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
}
func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *extensions.ReplicaSet) {
var controllerReference metav1.OwnerReference
var trueVar = true
controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: controllerKindRS.GroupVersion().String(), Kind: controllerKindRS.Kind, Name: rs.Name, Controller: &trueVar}
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
}
// pod, podName := newPod(t, name)
func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
var controllerReference metav1.OwnerReference
var trueVar = true
controllerReference = metav1.OwnerReference{UID: ss.UID, APIVersion: controllerKindSS.GroupVersion().String(), Kind: controllerKindSS.Kind, Name: ss.Name, Controller: &trueVar}
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
}
func newPod(t *testing.T, name string) (*v1.Pod, string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Annotations: make(map[string]string),
Name: name,
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: v1.PodSpec{},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{Type: v1.PodReady, Status: v1.ConditionTrue},
},
},
}
podName, err := controller.KeyFunc(pod)
if err != nil {
t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
}
return pod, podName
}
func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
rc := &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: v1.ReplicationControllerSpec{
Replicas: &size,
Selector: fooBar(),
},
}
rcName, err := controller.KeyFunc(rc)
if err != nil {
t.Fatalf("Unexpected error naming RC %q", rc.Name)
}
return rc, rcName
}
func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
d := &extensions.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: extensions.DeploymentSpec{
Replicas: &size,
Selector: newSelFooBar(),
},
}
dName, err := controller.KeyFunc(d)
if err != nil {
t.Fatalf("Unexpected error naming Deployment %q: %v", d.Name, err)
}
return d, dName
}
func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
rs := &extensions.ReplicaSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: extensions.ReplicaSetSpec{
Replicas: &size,
Selector: newSelFooBar(),
},
}
rsName, err := controller.KeyFunc(rs)
if err != nil {
t.Fatalf("Unexpected error naming ReplicaSet %q: %v", rs.Name, err)
}
return rs, rsName
}
func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
ss := &apps.StatefulSet{
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
UID: uuid.NewUUID(),
Name: "foobar",
Namespace: metav1.NamespaceDefault,
ResourceVersion: "18",
Labels: fooBar(),
},
Spec: apps.StatefulSetSpec{
Replicas: &size,
Selector: newSelFooBar(),
},
}
ssName, err := controller.KeyFunc(ss)
if err != nil {
t.Fatalf("Unexpected error naming StatefulSet %q: %v", ss.Name, err)
}
return ss, ssName
}
func update(t *testing.T, store cache.Store, obj interface{}) {
if err := store.Update(obj); err != nil {
t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
}
}
func add(t *testing.T, store cache.Store, obj interface{}) {
if err := store.Add(obj); err != nil {
t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
}
}
// Create one with no selector. Verify it matches 0 pods.
func TestNoSelector(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
pdb.Spec.Selector = &metav1.LabelSelector{}
pod, _ := newPod(t, "yo-yo-yo")
add(t, dc.pdbStore, pdb)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
add(t, dc.podStore, pod)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
}
// Verify that available/expected counts go up as we add pods, then verify that
// available count goes down when we make a pod unavailable.
func TestUnavailable(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
add(t, dc.pdbStore, pdb)
dc.sync(pdbName)
// Add three pods, verifying that the counts go up at each step.
pods := []*v1.Pod{}
for i := int32(0); i < 4; i++ {
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]metav1.Time{})
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
pods = append(pods, pod)
add(t, dc.podStore, pod)
dc.sync(pdbName)
}
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]metav1.Time{})
// Now set one pod as unavailable
pods[0].Status.Conditions = []v1.PodCondition{}
update(t, dc.podStore, pods[0])
dc.sync(pdbName)
// Verify expected update
ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4, map[string]metav1.Time{})
}
// Verify that an integer MaxUnavailable won't
// allow a disruption for pods with no controller.
func TestIntegerMaxUnavailable(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(1))
add(t, dc.pdbStore, pdb)
dc.sync(pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "naked")
add(t, dc.podStore, pod)
dc.sync(pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
}
// Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
// the selected pod's controller is modified.
func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(2))
add(t, dc.pdbStore, pdb)
rs, _ := newReplicaSet(t, 7)
add(t, dc.rsStore, rs)
pod, _ := newPod(t, "pod")
updatePodOwnerToRs(t, pod, rs)
add(t, dc.podStore, pod)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 5, 7, map[string]metav1.Time{})
// Update scale of ReplicaSet and check PDB
rs.Spec.Replicas = to.Int32Ptr(5)
update(t, dc.rsStore, rs)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 5, map[string]metav1.Time{})
}
// Create a pod with no controller, and verify that a PDB with a percentage
// specified won't allow a disruption.
func TestNakedPod(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
dc.sync(pdbName)
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
ps.VerifyDisruptionAllowed(t, pdbName, 0)
pod, _ := newPod(t, "naked")
add(t, dc.podStore, pod)
dc.sync(pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
}
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
func TestReplicaSet(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("20%"))
add(t, dc.pdbStore, pdb)
rs, _ := newReplicaSet(t, 10)
add(t, dc.rsStore, rs)
pod, _ := newPod(t, "pod")
updatePodOwnerToRs(t, pod, rs)
add(t, dc.podStore, pod)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
}
// Verify that multiple controllers doesn't allow the PDB to be set true.
func TestMultipleControllers(t *testing.T) {
const rcCount = 2
const podCount = 2
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("1%"))
add(t, dc.pdbStore, pdb)
pods := []*v1.Pod{}
for i := 0; i < podCount; i++ {
pod, _ := newPod(t, fmt.Sprintf("pod %d", i))
pods = append(pods, pod)
add(t, dc.podStore, pod)
}
dc.sync(pdbName)
// No controllers yet => no disruption allowed
ps.VerifyDisruptionAllowed(t, pdbName, 0)
rc, _ := newReplicationController(t, 1)
rc.Name = "rc 1"
for i := 0; i < podCount; i++ {
updatePodOwnerToRc(t, pods[i], rc)
}
add(t, dc.rcStore, rc)
dc.sync(pdbName)
// One RC and 200%>1% healthy => disruption allowed
ps.VerifyDisruptionAllowed(t, pdbName, 1)
rc, _ = newReplicationController(t, 1)
rc.Name = "rc 2"
for i := 0; i < podCount; i++ {
updatePodOwnerToRc(t, pods[i], rc)
}
add(t, dc.rcStore, rc)
dc.sync(pdbName)
// 100%>1% healthy BUT two RCs => no disruption allowed
// TODO: Find out if this assert is still needed
//ps.VerifyDisruptionAllowed(t, pdbName, 0)
}
func TestReplicationController(t *testing.T) {
// The budget in this test matches foo=bar, but the RC and its pods match
// {foo=bar, baz=quux}. Later, when we add a rogue pod with only a foo=bar
// label, it will match the budget but have no controllers, which should
// trigger the controller to set PodDisruptionAllowed to false.
labels := map[string]string{
"foo": "bar",
"baz": "quux",
}
dc, ps := newFakeDisruptionController()
// 34% should round up to 2
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
add(t, dc.pdbStore, pdb)
rc, _ := newReplicationController(t, 3)
rc.Spec.Selector = labels
add(t, dc.rcStore, rc)
dc.sync(pdbName)
// It starts out at 0 expected because, with no pods, the PDB doesn't know
// about the RC. This is a known bug. TODO(mml): file issue
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
pods := []*v1.Pod{}
for i := int32(0); i < 3; i++ {
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
updatePodOwnerToRc(t, pod, rc)
pods = append(pods, pod)
pod.Labels = labels
add(t, dc.podStore, pod)
dc.sync(pdbName)
if i < 2 {
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
}
}
rogue, _ := newPod(t, "rogue")
add(t, dc.podStore, rogue)
dc.sync(pdbName)
ps.VerifyDisruptionAllowed(t, pdbName, 0)
}
func TestStatefulSetController(t *testing.T) {
labels := map[string]string{
"foo": "bar",
"baz": "quux",
}
dc, ps := newFakeDisruptionController()
// 34% should round up to 2
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
add(t, dc.pdbStore, pdb)
ss, _ := newStatefulSet(t, 3)
add(t, dc.ssStore, ss)
dc.sync(pdbName)
// It starts out at 0 expected because, with no pods, the PDB doesn't know
// about the SS. This is a known bug. TODO(mml): file issue
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
pods := []*v1.Pod{}
for i := int32(0); i < 3; i++ {
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
updatePodOwnerToSs(t, pod, ss)
pods = append(pods, pod)
pod.Labels = labels
add(t, dc.podStore, pod)
dc.sync(pdbName)
if i < 2 {
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
}
}
}
func TestTwoControllers(t *testing.T) {
// Most of this test is in verifying intermediate cases as we define the
// three controllers and create the pods.
rcLabels := map[string]string{
"foo": "bar",
"baz": "quux",
}
dLabels := map[string]string{
"foo": "bar",
"baz": "quuux",
}
dc, ps := newFakeDisruptionController()
// These constants are related, but I avoid calculating the correct values in
// code. If you update a parameter here, recalculate the correct values for
// all of them. Further down in the test, we use these to control loops, and
// that level of logic is enough complexity for me.
const collectionSize int32 = 11 // How big each collection is
const minAvailable string = "28%" // minAvailable we'll specify
const minimumOne int32 = 4 // integer minimum with one controller
const minimumTwo int32 = 7 // integer minimum with two controllers
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
add(t, dc.pdbStore, pdb)
rc, _ := newReplicationController(t, collectionSize)
rc.Spec.Selector = rcLabels
add(t, dc.rcStore, rc)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
pods := []*v1.Pod{}
unavailablePods := collectionSize - minimumOne - 1
for i := int32(1); i <= collectionSize; i++ {
pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
updatePodOwnerToRc(t, pod, rc)
pods = append(pods, pod)
pod.Labels = rcLabels
if i <= unavailablePods {
pod.Status.Conditions = []v1.PodCondition{}
}
add(t, dc.podStore, pod)
dc.sync(pdbName)
if i <= unavailablePods {
ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize, map[string]metav1.Time{})
} else if i-unavailablePods <= minimumOne {
ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
}
}
d, _ := newDeployment(t, collectionSize)
d.Spec.Selector = newSel(dLabels)
add(t, dc.dStore, d)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
rs, _ := newReplicaSet(t, collectionSize)
rs.Spec.Selector = newSel(dLabels)
rs.Labels = dLabels
add(t, dc.rsStore, rs)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
for i := int32(1); i <= collectionSize; i++ {
pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
updatePodOwnerToRs(t, pod, rs)
pods = append(pods, pod)
pod.Labels = dLabels
if i <= unavailablePods {
pod.Status.Conditions = []v1.PodCondition{}
}
add(t, dc.podStore, pod)
dc.sync(pdbName)
if i <= unavailablePods {
ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
} else {
ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
}
}
// Now we verify we can bring down 1 pod and a disruption is still permitted,
// but if we bring down two, it's not. Then we make the pod ready again and
// verify that a disruption is permitted again.
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
update(t, dc.podStore, pods[collectionSize-1])
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
update(t, dc.podStore, pods[collectionSize-2])
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
update(t, dc.podStore, pods[collectionSize-1])
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
}
// Test pdb doesn't exist
func TestPDBNotExist(t *testing.T) {
dc, _ := newFakeDisruptionController()
pdb, _ := newMinAvailablePodDisruptionBudget(t, intstr.FromString("67%"))
add(t, dc.pdbStore, pdb)
if err := dc.sync("notExist"); err != nil {
t.Errorf("Unexpected error: %v, expect nil", err)
}
}
func TestUpdateDisruptedPods(t *testing.T) {
dc, ps := newFakeDisruptionController()
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb-queue")
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
currentTime := time.Now()
pdb.Status.DisruptedPods = map[string]metav1.Time{
"p1": {Time: currentTime}, // Should be removed, pod deletion started.
"p2": {Time: currentTime.Add(-5 * time.Minute)}, // Should be removed, expired.
"p3": {Time: currentTime}, // Should remain, pod untouched.
"notthere": {Time: currentTime}, // Should be removed, pod deleted.
}
add(t, dc.pdbStore, pdb)
pod1, _ := newPod(t, "p1")
pod1.DeletionTimestamp = &metav1.Time{Time: time.Now()}
pod2, _ := newPod(t, "p2")
pod3, _ := newPod(t, "p3")
add(t, dc.podStore, pod1)
add(t, dc.podStore, pod2)
add(t, dc.podStore, pod3)
dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}})
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller contains code for controllers (like the replication
// controller).
package controller // import "k8s.io/kubernetes/pkg/controller"

View File

@ -1,75 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"endpoints_controller.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/endpoint",
deps = [
"//pkg/api/v1/endpoints:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/util/metrics:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["endpoints_controller_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1/endpoints:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,10 +0,0 @@
approvers:
- bowei
- MrHohn
- thockin
- matchstick
reviewers:
- bowei
- MrHohn
- thockin
- matchstick

View File

@ -1,19 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package service provides EndpointController implementation
// to manage and sync service endpoints.
package endpoint // import "k8s.io/kubernetes/pkg/controller/endpoint"

View File

@ -1,598 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpoint
import (
"fmt"
"reflect"
"strconv"
"time"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/api/v1/endpoints"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/metrics"
"github.com/golang/glog"
)
const (
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
// sequence of delays between successive queuings of a service.
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
// An annotation on the Service denoting if the endpoints controller should
// go ahead and create endpoints for unready pods. This annotation is
// currently only used by StatefulSets, where we need the pod to be DNS
// resolvable during initialization and termination. In this situation we
// create a headless Service just for the StatefulSet, and clients shouldn't
// be using this Service for anything so unready endpoints don't matter.
// Endpoints of these Services retain their DNS records and continue
// receiving traffic for the Service from the moment the kubelet starts all
// containers in the pod and marks it "Running", till the kubelet stops all
// containers and deletes the pod from the apiserver.
// This field is deprecated. v1.Service.PublishNotReadyAddresses will replace it
// subsequent releases. It will be removed no sooner than 1.13.
TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints"
)
var (
keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
)
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer,
endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface) *EndpointController {
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.CoreV1().RESTClient().GetRateLimiter())
}
e := &EndpointController{
client: client,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "endpoint"),
workerLoopPeriod: time.Second,
}
serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.enqueueService,
UpdateFunc: func(old, cur interface{}) {
e.enqueueService(cur)
},
DeleteFunc: e.enqueueService,
})
e.serviceLister = serviceInformer.Lister()
e.servicesSynced = serviceInformer.Informer().HasSynced
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.addPod,
UpdateFunc: e.updatePod,
DeleteFunc: e.deletePod,
})
e.podLister = podInformer.Lister()
e.podsSynced = podInformer.Informer().HasSynced
e.endpointsLister = endpointsInformer.Lister()
e.endpointsSynced = endpointsInformer.Informer().HasSynced
return e
}
// EndpointController manages selector-based service endpoints.
type EndpointController struct {
client clientset.Interface
// serviceLister is able to list/get services and is populated by the shared informer passed to
// NewEndpointController.
serviceLister corelisters.ServiceLister
// servicesSynced returns true if the service shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
servicesSynced cache.InformerSynced
// podLister is able to list/get pods and is populated by the shared informer passed to
// NewEndpointController.
podLister corelisters.PodLister
// podsSynced returns true if the pod shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podsSynced cache.InformerSynced
// endpointsLister is able to list/get endpoints and is populated by the shared informer passed to
// NewEndpointController.
endpointsLister corelisters.EndpointsLister
// endpointsSynced returns true if the endpoints shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
endpointsSynced cache.InformerSynced
// Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much
// more often than services with few pods; it also would cause a
// service that's inserted multiple times to be processed more than
// necessary.
queue workqueue.RateLimitingInterface
// workerLoopPeriod is the time between worker runs. The workers process the queue of service and pod changes.
workerLoopPeriod time.Duration
}
// Run will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer e.queue.ShutDown()
glog.Infof("Starting endpoint controller")
defer glog.Infof("Shutting down endpoint controller")
if !controller.WaitForCacheSync("endpoint", stopCh, e.podsSynced, e.servicesSynced, e.endpointsSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(e.worker, e.workerLoopPeriod, stopCh)
}
go func() {
defer utilruntime.HandleCrash()
e.checkLeftoverEndpoints()
}()
<-stopCh
}
func (e *EndpointController) getPodServiceMemberships(pod *v1.Pod) (sets.String, error) {
set := sets.String{}
services, err := e.serviceLister.GetPodServices(pod)
if err != nil {
// don't log this error because this function makes pointless
// errors when no services match.
return set, nil
}
for i := range services {
key, err := keyFunc(services[i])
if err != nil {
return nil, err
}
set.Insert(key)
}
return set, nil
}
// When a pod is added, figure out what services it will be a member of and
// enqueue them. obj must have *v1.Pod type.
func (e *EndpointController) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
services, err := e.getPodServiceMemberships(pod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to get pod %s/%s's service memberships: %v", pod.Namespace, pod.Name, err))
return
}
for key := range services {
e.queue.Add(key)
}
}
func podToEndpointAddress(pod *v1.Pod) *v1.EndpointAddress {
return &v1.EndpointAddress{
IP: pod.Status.PodIP,
NodeName: &pod.Spec.NodeName,
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}
}
func podChanged(oldPod, newPod *v1.Pod) bool {
// If the pod's deletion timestamp is set, remove endpoint from ready address.
if newPod.DeletionTimestamp != oldPod.DeletionTimestamp {
return true
}
// If the pod's readiness has changed, the associated endpoint address
// will move from the unready endpoints set to the ready endpoints.
// So for the purposes of an endpoint, a readiness change on a pod
// means we have a changed pod.
if podutil.IsPodReady(oldPod) != podutil.IsPodReady(newPod) {
return true
}
// Convert the pod to an EndpointAddress, clear inert fields,
// and see if they are the same.
newEndpointAddress := podToEndpointAddress(newPod)
oldEndpointAddress := podToEndpointAddress(oldPod)
// Ignore the ResourceVersion because it changes
// with every pod update. This allows the comparison to
// show equality if all other relevant fields match.
newEndpointAddress.TargetRef.ResourceVersion = ""
oldEndpointAddress.TargetRef.ResourceVersion = ""
if reflect.DeepEqual(newEndpointAddress, oldEndpointAddress) {
// The pod has not changed in any way that impacts the endpoints
return false
}
return true
}
func determineNeededServiceUpdates(oldServices, services sets.String, podChanged bool) sets.String {
if podChanged {
// if the labels and pod changed, all services need to be updated
services = services.Union(oldServices)
} else {
// if only the labels changed, services not common to
// both the new and old service set (i.e the disjunctive union)
// need to be updated
services = services.Difference(oldServices).Union(oldServices.Difference(services))
}
return services
}
// When a pod is updated, figure out what services it used to be a member of
// and what services it will be a member of, and enqueue the union of these.
// old and cur must be *v1.Pod types.
func (e *EndpointController) updatePod(old, cur interface{}) {
newPod := cur.(*v1.Pod)
oldPod := old.(*v1.Pod)
if newPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs.
return
}
podChangedFlag := podChanged(oldPod, newPod)
// Check if the pod labels have changed, indicating a possible
// change in the service membership
labelsChanged := false
if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) ||
!hostNameAndDomainAreEqual(newPod, oldPod) {
labelsChanged = true
}
// If both the pod and labels are unchanged, no update is needed
if !podChangedFlag && !labelsChanged {
return
}
services, err := e.getPodServiceMemberships(newPod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to get pod %v/%v's service memberships: %v", newPod.Namespace, newPod.Name, err))
return
}
if labelsChanged {
oldServices, err := e.getPodServiceMemberships(oldPod)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to get pod %v/%v's service memberships: %v", oldPod.Namespace, oldPod.Name, err))
return
}
services = determineNeededServiceUpdates(oldServices, services, podChangedFlag)
}
for key := range services {
e.queue.Add(key)
}
}
func hostNameAndDomainAreEqual(pod1, pod2 *v1.Pod) bool {
return pod1.Spec.Hostname == pod2.Spec.Hostname &&
pod1.Spec.Subdomain == pod2.Spec.Subdomain
}
// When a pod is deleted, enqueue the services the pod used to be a member of.
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (e *EndpointController) deletePod(obj interface{}) {
if _, ok := obj.(*v1.Pod); ok {
// Enqueue all the services that the pod used to be a member
// of. This happens to be exactly the same thing we do when a
// pod is added.
e.addPod(obj)
return
}
// If we reached here it means the pod was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
pod, ok := tombstone.Obj.(*v1.Pod)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Pod: %#v", obj))
return
}
glog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name)
e.addPod(pod)
}
// obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
func (e *EndpointController) enqueueService(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
e.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them, and
// marks them done. You may run as many of these in parallel as you wish; the
// workqueue guarantees that they will not end up processing the same service
// at the same time.
func (e *EndpointController) worker() {
for e.processNextWorkItem() {
}
}
func (e *EndpointController) processNextWorkItem() bool {
eKey, quit := e.queue.Get()
if quit {
return false
}
defer e.queue.Done(eKey)
err := e.syncService(eKey.(string))
e.handleErr(err, eKey)
return true
}
func (e *EndpointController) handleErr(err error, key interface{}) {
if err == nil {
e.queue.Forget(key)
return
}
if e.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err)
e.queue.AddRateLimited(key)
return
}
glog.Warningf("Dropping service %q out of the queue: %v", key, err)
e.queue.Forget(key)
utilruntime.HandleError(err)
}
func (e *EndpointController) syncService(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
}()
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
service, err := e.serviceLister.Services(namespace).Get(name)
if err != nil {
// Delete the corresponding endpoint, as the service has been deleted.
// TODO: Please note that this will delete an endpoint when a
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
err = e.client.CoreV1().Endpoints(namespace).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
return err
}
return nil
}
if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
return nil
}
glog.V(5).Infof("About to update endpoints for service %q", key)
pods, err := e.podLister.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated())
if err != nil {
// Since we're getting stuff from a local cache, it is
// basically impossible to get this error.
return err
}
// If the user specified the older (deprecated) annotation, we have to respect it.
tolerateUnreadyEndpoints := service.Spec.PublishNotReadyAddresses
if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok {
b, err := strconv.ParseBool(v)
if err == nil {
tolerateUnreadyEndpoints = b
} else {
utilruntime.HandleError(fmt.Errorf("Failed to parse annotation %v: %v", TolerateUnreadyEndpointsAnnotation, err))
}
}
subsets := []v1.EndpointSubset{}
var totalReadyEps int = 0
var totalNotReadyEps int = 0
for _, pod := range pods {
if len(pod.Status.PodIP) == 0 {
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if !tolerateUnreadyEndpoints && pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}
epa := *podToEndpointAddress(pod)
hostname := pod.Spec.Hostname
if len(hostname) > 0 && pod.Spec.Subdomain == service.Name && service.Namespace == pod.Namespace {
epa.Hostname = hostname
}
// Allow headless service not to have ports.
if len(service.Spec.Ports) == 0 {
if service.Spec.ClusterIP == api.ClusterIPNone {
subsets, totalReadyEps, totalNotReadyEps = addEndpointSubset(subsets, pod, epa, nil, tolerateUnreadyEndpoints)
// No need to repack subsets for headless service without ports.
}
} else {
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
portName := servicePort.Name
portProto := servicePort.Protocol
portNum, err := podutil.FindPort(pod, servicePort)
if err != nil {
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
var readyEps, notReadyEps int
epp := &v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
subsets, readyEps, notReadyEps = addEndpointSubset(subsets, pod, epa, epp, tolerateUnreadyEndpoints)
totalReadyEps = totalReadyEps + readyEps
totalNotReadyEps = totalNotReadyEps + notReadyEps
}
subsets = endpoints.RepackSubsets(subsets)
}
}
// See if there's actually an update here.
currentEndpoints, err := e.endpointsLister.Endpoints(service.Namespace).Get(service.Name)
if err != nil {
if errors.IsNotFound(err) {
currentEndpoints = &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: service.Name,
Labels: service.Labels,
},
}
} else {
return err
}
}
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
if !createEndpoints &&
apiequality.Semantic.DeepEqual(currentEndpoints.Subsets, subsets) &&
apiequality.Semantic.DeepEqual(currentEndpoints.Labels, service.Labels) {
glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
return nil
}
newEndpoints := currentEndpoints.DeepCopy()
newEndpoints.Subsets = subsets
newEndpoints.Labels = service.Labels
if newEndpoints.Annotations == nil {
newEndpoints.Annotations = make(map[string]string)
}
glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps)
if createEndpoints {
// No previous endpoints, create them
_, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints)
} else {
// Pre-existing
_, err = e.client.CoreV1().Endpoints(service.Namespace).Update(newEndpoints)
}
if err != nil {
if createEndpoints && errors.IsForbidden(err) {
// A request is forbidden primarily for two reasons:
// 1. namespace is terminating, endpoint creation is not allowed by default.
// 2. policy is misconfigured, in which case no service would function anywhere.
// Given the frequency of 1, we log at a lower level.
glog.V(5).Infof("Forbidden from creating endpoints: %v", err)
}
return err
}
return nil
}
// checkLeftoverEndpoints lists all currently existing endpoints and adds their
// service to the queue. This will detect endpoints that exist with no
// corresponding service; these endpoints need to be deleted. We only need to
// do this once on startup, because in steady-state these are detected (but
// some stragglers could have been left behind if the endpoint controller
// reboots).
func (e *EndpointController) checkLeftoverEndpoints() {
list, err := e.endpointsLister.List(labels.Everything())
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err))
return
}
for _, ep := range list {
if _, ok := ep.Annotations[resourcelock.LeaderElectionRecordAnnotationKey]; ok {
// when there are multiple controller-manager instances,
// we observe that it will delete leader-election endpoints after 5min
// and cause re-election
// so skip the delete here
// as leader-election only have endpoints without service
continue
}
key, err := keyFunc(ep)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to get key for endpoint %#v", ep))
continue
}
e.queue.Add(key)
}
}
func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.EndpointAddress,
epp *v1.EndpointPort, tolerateUnreadyEndpoints bool) ([]v1.EndpointSubset, int, int) {
var readyEps int = 0
var notReadyEps int = 0
ports := []v1.EndpointPort{}
if epp != nil {
ports = append(ports, *epp)
}
if tolerateUnreadyEndpoints || podutil.IsPodReady(pod) {
subsets = append(subsets, v1.EndpointSubset{
Addresses: []v1.EndpointAddress{epa},
Ports: ports,
})
readyEps++
} else if shouldPodBeInEndpoints(pod) {
glog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name)
subsets = append(subsets, v1.EndpointSubset{
NotReadyAddresses: []v1.EndpointAddress{epa},
Ports: ports,
})
notReadyEps++
}
return subsets, readyEps, notReadyEps
}
func shouldPodBeInEndpoints(pod *v1.Pod) bool {
switch pod.Spec.RestartPolicy {
case v1.RestartPolicyNever:
return pod.Status.Phase != v1.PodFailed && pod.Status.Phase != v1.PodSucceeded
case v1.RestartPolicyOnFailure:
return pod.Status.Phase != v1.PodSucceeded
default:
return true
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,91 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"errors.go",
"garbagecollector.go",
"graph.go",
"graph_builder.go",
"operations.go",
"patch.go",
"uid_cache.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector",
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/garbagecollector/metaonly:go_default_library",
"//pkg/util/reflector/prometheus:go_default_library",
"//pkg/util/workqueue/prometheus:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/golang/groupcache/lru:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["garbagecollector_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/garbagecollector/metaonly:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,8 +0,0 @@
approvers:
- caesarxuchao
- lavalamp
- deads2k
reviewers:
- caesarxuchao
- lavalamp
- deads2k

View File

@ -1,43 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
)
type restMappingError struct {
kind string
version string
}
func (r *restMappingError) Error() string {
versionKind := fmt.Sprintf("%s/%s", r.version, r.kind)
return fmt.Sprintf("unable to get REST mapping for %s.", versionKind)
}
// Message prints more details
func (r *restMappingError) Message() string {
versionKind := fmt.Sprintf("%s/%s", r.version, r.kind)
errMsg := fmt.Sprintf("unable to get REST mapping for %s. ", versionKind)
errMsg += fmt.Sprintf(" If %s is an invalid resource, then you should manually remove ownerReferences that refer %s objects.", versionKind, versionKind)
return errMsg
}
func newRESTMappingError(kind, version string) *restMappingError {
return &restMappingError{kind: kind, version: version}
}

View File

@ -1,672 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
"reflect"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller"
_ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration
// install the prometheus plugin
_ "k8s.io/kubernetes/pkg/util/workqueue/prometheus"
// import known versions
_ "k8s.io/client-go/kubernetes"
)
const ResourceResyncTime time.Duration = 0
// GarbageCollector runs reflectors to watch for changes of managed API
// objects, funnels the results to a single-threaded dependencyGraphBuilder,
// which builds a graph caching the dependencies among objects. Triggered by the
// graph changes, the dependencyGraphBuilder enqueues objects that can
// potentially be garbage-collected to the `attemptToDelete` queue, and enqueues
// objects whose dependents need to be orphaned to the `attemptToOrphan` queue.
// The GarbageCollector has workers who consume these two queues, send requests
// to the API server to delete/update the objects accordingly.
// Note that having the dependencyGraphBuilder notify the garbage collector
// ensures that the garbage collector operates with a graph that is at least as
// up to date as the notification is sent.
type GarbageCollector struct {
restMapper resettableRESTMapper
dynamicClient dynamic.Interface
// garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
attemptToDelete workqueue.RateLimitingInterface
// garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
attemptToOrphan workqueue.RateLimitingInterface
dependencyGraphBuilder *GraphBuilder
// GC caches the owners that do not exist according to the API server.
absentOwnerCache *UIDCache
sharedInformers informers.SharedInformerFactory
workerLock sync.RWMutex
}
func NewGarbageCollector(
dynamicClient dynamic.Interface,
mapper resettableRESTMapper,
deletableResources map[schema.GroupVersionResource]struct{},
ignoredResources map[schema.GroupResource]struct{},
sharedInformers informers.SharedInformerFactory,
informersStarted <-chan struct{},
) (*GarbageCollector, error) {
attemptToDelete := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_delete")
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
absentOwnerCache := NewUIDCache(500)
gc := &GarbageCollector{
dynamicClient: dynamicClient,
restMapper: mapper,
attemptToDelete: attemptToDelete,
attemptToOrphan: attemptToOrphan,
absentOwnerCache: absentOwnerCache,
}
gb := &GraphBuilder{
dynamicClient: dynamicClient,
informersStarted: informersStarted,
restMapper: mapper,
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
uidToNode: &concurrentUIDToNode{
uidToNode: make(map[types.UID]*node),
},
attemptToDelete: attemptToDelete,
attemptToOrphan: attemptToOrphan,
absentOwnerCache: absentOwnerCache,
sharedInformers: sharedInformers,
ignoredResources: ignoredResources,
}
if err := gb.syncMonitors(deletableResources); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to sync all monitors: %v", err))
}
gc.dependencyGraphBuilder = gb
return gc, nil
}
// resyncMonitors starts or stops resource monitors as needed to ensure that all
// (and only) those resources present in the map are monitored.
func (gc *GarbageCollector) resyncMonitors(deletableResources map[schema.GroupVersionResource]struct{}) error {
if err := gc.dependencyGraphBuilder.syncMonitors(deletableResources); err != nil {
return err
}
gc.dependencyGraphBuilder.startMonitors()
return nil
}
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer gc.attemptToDelete.ShutDown()
defer gc.attemptToOrphan.ShutDown()
defer gc.dependencyGraphBuilder.graphChanges.ShutDown()
glog.Infof("Starting garbage collector controller")
defer glog.Infof("Shutting down garbage collector controller")
go gc.dependencyGraphBuilder.Run(stopCh)
if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) {
return
}
glog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage")
// gc workers
for i := 0; i < workers; i++ {
go wait.Until(gc.runAttemptToDeleteWorker, 1*time.Second, stopCh)
go wait.Until(gc.runAttemptToOrphanWorker, 1*time.Second, stopCh)
}
<-stopCh
}
// resettableRESTMapper is a RESTMapper which is capable of resetting itself
// from discovery.
type resettableRESTMapper interface {
meta.RESTMapper
Reset()
}
// Sync periodically resyncs the garbage collector when new resources are
// observed from discovery. When new resources are detected, Sync will stop all
// GC workers, reset gc.restMapper, and resync the monitors.
//
// Note that discoveryClient should NOT be shared with gc.restMapper, otherwise
// the mapper's underlying discovery client will be unnecessarily reset during
// the course of detecting new resources.
func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterface, period time.Duration, stopCh <-chan struct{}) {
oldResources := make(map[schema.GroupVersionResource]struct{})
wait.Until(func() {
// Get the current resource list from discovery.
newResources := GetDeletableResources(discoveryClient)
// This can occur if there is an internal error in GetDeletableResources.
if len(newResources) == 0 {
glog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
return
}
// Decide whether discovery has reported a change.
if reflect.DeepEqual(oldResources, newResources) {
glog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
return
}
// Ensure workers are paused to avoid processing events before informers
// have resynced.
gc.workerLock.Lock()
defer gc.workerLock.Unlock()
// Once we get here, we should not unpause workers until we've successfully synced
attempt := 0
wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) {
attempt++
// On a reattempt, check if available resources have changed
if attempt > 1 {
newResources = GetDeletableResources(discoveryClient)
if len(newResources) == 0 {
glog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
return false, nil
}
}
glog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
// Resetting the REST mapper will also invalidate the underlying discovery
// client. This is a leaky abstraction and assumes behavior about the REST
// mapper, but we'll deal with it for now.
gc.restMapper.Reset()
glog.V(4).Infof("reset restmapper")
// Perform the monitor resync and wait for controllers to report cache sync.
//
// NOTE: It's possible that newResources will diverge from the resources
// discovered by restMapper during the call to Reset, since they are
// distinct discovery clients invalidated at different times. For example,
// newResources may contain resources not returned in the restMapper's
// discovery call if the resources appeared in-between the calls. In that
// case, the restMapper will fail to map some of newResources until the next
// attempt.
if err := gc.resyncMonitors(newResources); err != nil {
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err))
return false, nil
}
glog.V(4).Infof("resynced monitors")
// wait for caches to fill for a while (our sync period) before attempting to rediscover resources and retry syncing.
// this protects us from deadlocks where available resources changed and one of our informer caches will never fill.
// informers keep attempting to sync in the background, so retrying doesn't interrupt them.
// the call to resyncMonitors on the reattempt will no-op for resources that still exist.
// note that workers stay paused until we successfully resync.
if !controller.WaitForCacheSync("garbage collector", waitForStopOrTimeout(stopCh, period), gc.dependencyGraphBuilder.IsSynced) {
utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync (attempt %d)", attempt))
return false, nil
}
// success, break out of the loop
return true, nil
}, stopCh)
// Finally, keep track of our new state. Do this after all preceding steps
// have succeeded to ensure we'll retry on subsequent syncs if an error
// occurred.
oldResources = newResources
glog.V(2).Infof("synced garbage collector")
}, period, stopCh)
}
// printDiff returns a human-readable summary of what resources were added and removed
func printDiff(oldResources, newResources map[schema.GroupVersionResource]struct{}) string {
removed := sets.NewString()
for oldResource := range oldResources {
if _, ok := newResources[oldResource]; !ok {
removed.Insert(fmt.Sprintf("%+v", oldResource))
}
}
added := sets.NewString()
for newResource := range newResources {
if _, ok := oldResources[newResource]; !ok {
added.Insert(fmt.Sprintf("%+v", newResource))
}
}
return fmt.Sprintf("added: %v, removed: %v", added.List(), removed.List())
}
// waitForStopOrTimeout returns a stop channel that closes when the provided stop channel closes or when the specified timeout is reached
func waitForStopOrTimeout(stopCh <-chan struct{}, timeout time.Duration) <-chan struct{} {
stopChWithTimeout := make(chan struct{})
go func() {
select {
case <-stopCh:
case <-time.After(timeout):
}
close(stopChWithTimeout)
}()
return stopChWithTimeout
}
func (gc *GarbageCollector) IsSynced() bool {
return gc.dependencyGraphBuilder.IsSynced()
}
func (gc *GarbageCollector) runAttemptToDeleteWorker() {
for gc.attemptToDeleteWorker() {
}
}
func (gc *GarbageCollector) attemptToDeleteWorker() bool {
item, quit := gc.attemptToDelete.Get()
gc.workerLock.RLock()
defer gc.workerLock.RUnlock()
if quit {
return false
}
defer gc.attemptToDelete.Done(item)
n, ok := item.(*node)
if !ok {
utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", item))
return true
}
err := gc.attemptToDeleteItem(n)
if err != nil {
if _, ok := err.(*restMappingError); ok {
// There are at least two ways this can happen:
// 1. The reference is to an object of a custom type that has not yet been
// recognized by gc.restMapper (this is a transient error).
// 2. The reference is to an invalid group/version. We don't currently
// have a way to distinguish this from a valid type we will recognize
// after the next discovery sync.
// For now, record the error and retry.
glog.V(5).Infof("error syncing item %s: %v", n, err)
} else {
utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err))
}
// retry if garbage collection of an object failed.
gc.attemptToDelete.AddRateLimited(item)
} else if !n.isObserved() {
// requeue if item hasn't been observed via an informer event yet.
// otherwise a virtual node for an item added AND removed during watch reestablishment can get stuck in the graph and never removed.
// see https://issue.k8s.io/56121
glog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity)
gc.attemptToDelete.AddRateLimited(item)
}
return true
}
// isDangling check if a reference is pointing to an object that doesn't exist.
// If isDangling looks up the referenced object at the API server, it also
// returns its latest state.
func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
dangling bool, owner *unstructured.Unstructured, err error) {
if gc.absentOwnerCache.Has(reference.UID) {
glog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
return true, nil, nil
}
// TODO: we need to verify the reference resource is supported by the
// system. If it's not a valid resource, the garbage collector should i)
// ignore the reference when decide if the object should be deleted, and
// ii) should update the object to remove such references. This is to
// prevent objects having references to an old resource from being
// deleted during a cluster upgrade.
resource, namespaced, err := gc.apiResource(reference.APIVersion, reference.Kind)
if err != nil {
return false, nil, err
}
// TODO: It's only necessary to talk to the API server if the owner node
// is a "virtual" node. The local graph could lag behind the real
// status, but in practice, the difference is small.
owner, err = gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
switch {
case errors.IsNotFound(err):
gc.absentOwnerCache.Add(reference.UID)
glog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
return true, nil, nil
case err != nil:
return false, nil, err
}
if owner.GetUID() != reference.UID {
glog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
gc.absentOwnerCache.Add(reference.UID)
return true, nil, nil
}
return false, owner, nil
}
// classify the latestReferences to three categories:
// solid: the owner exists, and is not "waitingForDependentsDeletion"
// dangling: the owner does not exist
// waitingForDependentsDeletion: the owner exists, its deletionTimestamp is non-nil, and it has
// FinalizerDeletingDependents
// This function communicates with the server.
func (gc *GarbageCollector) classifyReferences(item *node, latestReferences []metav1.OwnerReference) (
solid, dangling, waitingForDependentsDeletion []metav1.OwnerReference, err error) {
for _, reference := range latestReferences {
isDangling, owner, err := gc.isDangling(reference, item)
if err != nil {
return nil, nil, nil, err
}
if isDangling {
dangling = append(dangling, reference)
continue
}
ownerAccessor, err := meta.Accessor(owner)
if err != nil {
return nil, nil, nil, err
}
if ownerAccessor.GetDeletionTimestamp() != nil && hasDeleteDependentsFinalizer(ownerAccessor) {
waitingForDependentsDeletion = append(waitingForDependentsDeletion, reference)
} else {
solid = append(solid, reference)
}
}
return solid, dangling, waitingForDependentsDeletion, nil
}
func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID {
var ret []types.UID
for _, ref := range refs {
ret = append(ret, ref.UID)
}
return ret
}
func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
glog.V(2).Infof("processing item %s", item.identity)
// "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents.
if item.isBeingDeleted() && !item.isDeletingDependents() {
glog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity)
return nil
}
// TODO: It's only necessary to talk to the API server if this is a
// "virtual" node. The local graph could lag behind the real status, but in
// practice, the difference is small.
latest, err := gc.getObject(item.identity)
switch {
case errors.IsNotFound(err):
// the GraphBuilder can add "virtual" node for an owner that doesn't
// exist yet, so we need to enqueue a virtual Delete event to remove
// the virtual node from GraphBuilder.uidToNode.
glog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity)
gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
// since we're manually inserting a delete event to remove this node,
// we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
item.markObserved()
return nil
case err != nil:
return err
}
if latest.GetUID() != item.identity.UID {
glog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity)
gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
// since we're manually inserting a delete event to remove this node,
// we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
item.markObserved()
return nil
}
// TODO: attemptToOrphanWorker() routine is similar. Consider merging
// attemptToOrphanWorker() into attemptToDeleteItem() as well.
if item.isDeletingDependents() {
return gc.processDeletingDependentsItem(item)
}
// compute if we should delete the item
ownerReferences := latest.GetOwnerReferences()
if len(ownerReferences) == 0 {
glog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity)
return nil
}
solid, dangling, waitingForDependentsDeletion, err := gc.classifyReferences(item, ownerReferences)
if err != nil {
return err
}
glog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion)
switch {
case len(solid) != 0:
glog.V(2).Infof("object %s has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
return nil
}
glog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity)
// waitingForDependentsDeletion needs to be deleted from the
// ownerReferences, otherwise the referenced objects will be stuck with
// the FinalizerDeletingDependents and never get deleted.
ownerUIDs := append(ownerRefsToUIDs(dangling), ownerRefsToUIDs(waitingForDependentsDeletion)...)
patch := deleteOwnerRefStrategicMergePatch(item.identity.UID, ownerUIDs...)
_, err = gc.patch(item, patch, func(n *node) ([]byte, error) {
return gc.deleteOwnerRefJSONMergePatch(n, ownerUIDs...)
})
return err
case len(waitingForDependentsDeletion) != 0 && item.dependentsLength() != 0:
deps := item.getDependents()
for _, dep := range deps {
if dep.isDeletingDependents() {
// this circle detection has false positives, we need to
// apply a more rigorous detection if this turns out to be a
// problem.
// there are multiple workers run attemptToDeleteItem in
// parallel, the circle detection can fail in a race condition.
glog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
patch, err := item.unblockOwnerReferencesStrategicMergePatch()
if err != nil {
return err
}
if _, err := gc.patch(item, patch, gc.unblockOwnerReferencesJSONMergePatch); err != nil {
return err
}
break
}
}
glog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity)
// the deletion event will be observed by the graphBuilder, so the item
// will be processed again in processDeletingDependentsItem. If it
// doesn't have dependents, the function will remove the
// FinalizerDeletingDependents from the item, resulting in the final
// deletion of the item.
policy := metav1.DeletePropagationForeground
return gc.deleteObject(item.identity, &policy)
default:
// item doesn't have any solid owner, so it needs to be garbage
// collected. Also, none of item's owners is waiting for the deletion of
// the dependents, so set propagationPolicy based on existing finalizers.
var policy metav1.DeletionPropagation
switch {
case hasOrphanFinalizer(latest):
// if an existing orphan finalizer is already on the object, honor it.
policy = metav1.DeletePropagationOrphan
case hasDeleteDependentsFinalizer(latest):
// if an existing foreground finalizer is already on the object, honor it.
policy = metav1.DeletePropagationForeground
default:
// otherwise, default to background.
policy = metav1.DeletePropagationBackground
}
glog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy)
return gc.deleteObject(item.identity, &policy)
}
}
// process item that's waiting for its dependents to be deleted
func (gc *GarbageCollector) processDeletingDependentsItem(item *node) error {
blockingDependents := item.blockingDependents()
if len(blockingDependents) == 0 {
glog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity)
return gc.removeFinalizer(item, metav1.FinalizerDeleteDependents)
}
for _, dep := range blockingDependents {
if !dep.isDeletingDependents() {
glog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity)
gc.attemptToDelete.Add(dep)
}
}
return nil
}
// dependents are copies of pointers to the owner's dependents, they don't need to be locked.
func (gc *GarbageCollector) orphanDependents(owner objectReference, dependents []*node) error {
errCh := make(chan error, len(dependents))
wg := sync.WaitGroup{}
wg.Add(len(dependents))
for i := range dependents {
go func(dependent *node) {
defer wg.Done()
// the dependent.identity.UID is used as precondition
patch := deleteOwnerRefStrategicMergePatch(dependent.identity.UID, owner.UID)
_, err := gc.patch(dependent, patch, func(n *node) ([]byte, error) {
return gc.deleteOwnerRefJSONMergePatch(n, owner.UID)
})
// note that if the target ownerReference doesn't exist in the
// dependent, strategic merge patch will NOT return an error.
if err != nil && !errors.IsNotFound(err) {
errCh <- fmt.Errorf("orphaning %s failed, %v", dependent.identity, err)
}
}(dependents[i])
}
wg.Wait()
close(errCh)
var errorsSlice []error
for e := range errCh {
errorsSlice = append(errorsSlice, e)
}
if len(errorsSlice) != 0 {
return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error())
}
glog.V(5).Infof("successfully updated all dependents of owner %s", owner)
return nil
}
func (gc *GarbageCollector) runAttemptToOrphanWorker() {
for gc.attemptToOrphanWorker() {
}
}
// attemptToOrphanWorker dequeues a node from the attemptToOrphan, then finds its
// dependents based on the graph maintained by the GC, then removes it from the
// OwnerReferences of its dependents, and finally updates the owner to remove
// the "Orphan" finalizer. The node is added back into the attemptToOrphan if any of
// these steps fail.
func (gc *GarbageCollector) attemptToOrphanWorker() bool {
item, quit := gc.attemptToOrphan.Get()
gc.workerLock.RLock()
defer gc.workerLock.RUnlock()
if quit {
return false
}
defer gc.attemptToOrphan.Done(item)
owner, ok := item.(*node)
if !ok {
utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", item))
return true
}
// we don't need to lock each element, because they never get updated
owner.dependentsLock.RLock()
dependents := make([]*node, 0, len(owner.dependents))
for dependent := range owner.dependents {
dependents = append(dependents, dependent)
}
owner.dependentsLock.RUnlock()
err := gc.orphanDependents(owner.identity, dependents)
if err != nil {
utilruntime.HandleError(fmt.Errorf("orphanDependents for %s failed with %v", owner.identity, err))
gc.attemptToOrphan.AddRateLimited(item)
return true
}
// update the owner, remove "orphaningFinalizer" from its finalizers list
err = gc.removeFinalizer(owner, metav1.FinalizerOrphanDependents)
if err != nil {
utilruntime.HandleError(fmt.Errorf("removeOrphanFinalizer for %s failed with %v", owner.identity, err))
gc.attemptToOrphan.AddRateLimited(item)
}
return true
}
// *FOR TEST USE ONLY*
// GraphHasUID returns if the GraphBuilder has a particular UID store in its
// uidToNode graph. It's useful for debugging.
// This method is used by integration tests.
func (gc *GarbageCollector) GraphHasUID(UIDs []types.UID) bool {
for _, u := range UIDs {
if _, ok := gc.dependencyGraphBuilder.uidToNode.Read(u); ok {
return true
}
}
return false
}
// GetDeletableResources returns all resources from discoveryClient that the
// garbage collector should recognize and work with. More specifically, all
// preferred resources which support the 'delete', 'list', and 'watch' verbs.
//
// All discovery errors are considered temporary. Upon encountering any error,
// GetDeletableResources will log and return any discovered resources it was
// able to process (which may be none).
func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) map[schema.GroupVersionResource]struct{} {
preferredResources, err := discoveryClient.ServerPreferredResources()
if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) {
glog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
} else {
glog.Warningf("failed to discover preferred resources: %v", err)
}
}
if preferredResources == nil {
return map[schema.GroupVersionResource]struct{}{}
}
// This is extracted from discovery.GroupVersionResources to allow tolerating
// failures on a per-resource basis.
deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete", "list", "watch"}}, preferredResources)
deletableGroupVersionResources := map[schema.GroupVersionResource]struct{}{}
for _, rl := range deletableResources {
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
if err != nil {
glog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
continue
}
for i := range rl.APIResources {
deletableGroupVersionResources[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{}
}
}
return deletableGroupVersionResources
}

View File

@ -1,968 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/api/legacyscheme"
)
type testRESTMapper struct {
meta.RESTMapper
}
func (_ *testRESTMapper) Reset() {}
func TestGarbageCollectorConstruction(t *testing.T) {
config := &restclient.Config{}
tweakableRM := meta.NewDefaultRESTMapper(nil)
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
podResource := map[schema.GroupVersionResource]struct{}{
{Version: "v1", Resource: "pods"}: {},
}
twoResources := map[schema.GroupVersionResource]struct{}{
{Version: "v1", Resource: "pods"}: {},
{Group: "tpr.io", Version: "v1", Resource: "unknown"}: {},
}
client := fake.NewSimpleClientset()
sharedInformers := informers.NewSharedInformerFactory(client, 0)
// No monitor will be constructed for the non-core resource, but the GC
// construction will not fail.
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := NewGarbageCollector(dynamicClient, rm, twoResources, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
// Make sure resource monitor syncing creates and stops resource monitors.
tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
err = gc.resyncMonitors(twoResources)
if err != nil {
t.Errorf("Failed adding a monitor: %v", err)
}
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
err = gc.resyncMonitors(podResource)
if err != nil {
t.Errorf("Failed removing a monitor: %v", err)
}
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
// Make sure the syncing mechanism also works after Run() has been called
stopCh := make(chan struct{})
defer close(stopCh)
go gc.Run(1, stopCh)
err = gc.resyncMonitors(twoResources)
if err != nil {
t.Errorf("Failed adding a monitor: %v", err)
}
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
err = gc.resyncMonitors(podResource)
if err != nil {
t.Errorf("Failed removing a monitor: %v", err)
}
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
}
// fakeAction records information about requests to aid in testing.
type fakeAction struct {
method string
path string
query string
}
// String returns method=path to aid in testing
func (f *fakeAction) String() string {
return strings.Join([]string{f.method, f.path}, "=")
}
type FakeResponse struct {
statusCode int
content []byte
}
// fakeActionHandler holds a list of fakeActions received
type fakeActionHandler struct {
// statusCode and content returned by this handler for different method + path.
response map[string]FakeResponse
lock sync.Mutex
actions []fakeAction
}
// ServeHTTP logs the action that occurred and always returns the associated status code
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
func() {
f.lock.Lock()
defer f.lock.Unlock()
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
fakeResponse, ok := f.response[request.Method+request.URL.Path]
if !ok {
fakeResponse.statusCode = 200
fakeResponse.content = []byte("{\"kind\": \"List\"}")
}
response.Header().Set("Content-Type", "application/json")
response.WriteHeader(fakeResponse.statusCode)
response.Write(fakeResponse.content)
}()
// This is to allow the fakeActionHandler to simulate a watch being opened
if strings.Contains(request.URL.RawQuery, "watch=true") {
hijacker, ok := response.(http.Hijacker)
if !ok {
return
}
connection, _, err := hijacker.Hijack()
if err != nil {
return
}
defer connection.Close()
time.Sleep(30 * time.Second)
}
}
// testServerAndClientConfig returns a server that listens and a config that can reference it
func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
srv := httptest.NewServer(http.HandlerFunc(handler))
config := &restclient.Config{
Host: srv.URL,
}
return srv, config
}
type garbageCollector struct {
*GarbageCollector
stop chan struct{}
}
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
podResource := map[schema.GroupVersionResource]struct{}{{Version: "v1", Resource: "pods"}: {}}
client := fake.NewSimpleClientset()
sharedInformers := informers.NewSharedInformerFactory(client, 0)
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := NewGarbageCollector(dynamicClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
if err != nil {
t.Fatal(err)
}
stop := make(chan struct{})
go sharedInformers.Start(stop)
return garbageCollector{gc, stop}
}
func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: "ns1",
OwnerReferences: ownerReferences,
},
}
}
func serilizeOrDie(t *testing.T, object interface{}) []byte {
data, err := json.Marshal(object)
if err != nil {
t.Fatal(err)
}
return data
}
// test the attemptToDeleteItem function making the expected actions.
func TestAttemptToDeleteItem(t *testing.T) {
pod := getPod("ToBeDeletedPod", []metav1.OwnerReference{
{
Kind: "ReplicationController",
Name: "owner1",
UID: "123",
APIVersion: "v1",
},
})
testHandler := &fakeActionHandler{
response: map[string]FakeResponse{
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
404,
[]byte{},
},
"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
200,
serilizeOrDie(t, pod),
},
},
}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
gc := setupGC(t, clientConfig)
defer close(gc.stop)
item := &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
Kind: pod.Kind,
APIVersion: pod.APIVersion,
Name: pod.Name,
UID: pod.UID,
},
Namespace: pod.Namespace,
},
// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
owners: nil,
}
err := gc.attemptToDeleteItem(item)
if err != nil {
t.Errorf("Unexpected Error: %v", err)
}
expectedActionSet := sets.NewString()
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
actualActionSet := sets.NewString()
for _, action := range testHandler.actions {
actualActionSet.Insert(action.String())
}
if !expectedActionSet.Equal(actualActionSet) {
t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
actualActionSet, expectedActionSet.Difference(actualActionSet))
}
}
// verifyGraphInvariants verifies that all of a node's owners list the node as a
// dependent and vice versa. uidToNode has all the nodes in the graph.
func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) {
for myUID, node := range uidToNode {
for dependentNode := range node.dependents {
found := false
for _, owner := range dependentNode.owners {
if owner.UID == myUID {
found = true
break
}
}
if !found {
t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity)
}
}
for _, owner := range node.owners {
ownerNode, ok := uidToNode[owner.UID]
if !ok {
// It's possible that the owner node doesn't exist
continue
}
if _, ok := ownerNode.dependents[node]; !ok {
t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity)
}
}
}
}
func createEvent(eventType eventType, selfUID string, owners []string) event {
var ownerReferences []metav1.OwnerReference
for i := 0; i < len(owners); i++ {
ownerReferences = append(ownerReferences, metav1.OwnerReference{UID: types.UID(owners[i])})
}
return event{
eventType: eventType,
obj: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(selfUID),
OwnerReferences: ownerReferences,
},
},
}
}
func TestProcessEvent(t *testing.T) {
var testScenarios = []struct {
name string
// a series of events that will be supplied to the
// GraphBuilder.graphChanges.
events []event
}{
{
name: "test1",
events: []event{
createEvent(addEvent, "1", []string{}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "3", []string{"1", "2"}),
},
},
{
name: "test2",
events: []event{
createEvent(addEvent, "1", []string{}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "3", []string{"1", "2"}),
createEvent(addEvent, "4", []string{"2"}),
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
},
},
{
name: "test3",
events: []event{
createEvent(addEvent, "1", []string{}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "3", []string{"1", "2"}),
createEvent(addEvent, "4", []string{"3"}),
createEvent(updateEvent, "2", []string{"4"}),
},
},
{
name: "reverse test2",
events: []event{
createEvent(addEvent, "4", []string{"2"}),
createEvent(addEvent, "3", []string{"1", "2"}),
createEvent(addEvent, "2", []string{"1"}),
createEvent(addEvent, "1", []string{}),
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
},
},
}
alwaysStarted := make(chan struct{})
close(alwaysStarted)
for _, scenario := range testScenarios {
dependencyGraphBuilder := &GraphBuilder{
informersStarted: alwaysStarted,
graphChanges: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
uidToNode: &concurrentUIDToNode{
uidToNodeLock: sync.RWMutex{},
uidToNode: make(map[types.UID]*node),
},
attemptToDelete: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
absentOwnerCache: NewUIDCache(2),
}
for i := 0; i < len(scenario.events); i++ {
dependencyGraphBuilder.graphChanges.Add(&scenario.events[i])
dependencyGraphBuilder.processGraphChanges()
verifyGraphInvariants(scenario.name, dependencyGraphBuilder.uidToNode.uidToNode, t)
}
}
}
// TestDependentsRace relies on golang's data race detector to check if there is
// data race among in the dependents field.
func TestDependentsRace(t *testing.T) {
gc := setupGC(t, &restclient.Config{})
defer close(gc.stop)
const updates = 100
owner := &node{dependents: make(map[*node]struct{})}
ownerUID := types.UID("owner")
gc.dependencyGraphBuilder.uidToNode.Write(owner)
go func() {
for i := 0; i < updates; i++ {
dependent := &node{}
gc.dependencyGraphBuilder.addDependentToOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
gc.dependencyGraphBuilder.removeDependentFromOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
}
}()
go func() {
gc.attemptToOrphan.Add(owner)
for i := 0; i < updates; i++ {
gc.attemptToOrphanWorker()
}
}()
}
// test the list and watch functions correctly converts the ListOptions
func TestGCListWatcher(t *testing.T) {
testHandler := &fakeActionHandler{}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
podResource := schema.GroupVersionResource{Version: "v1", Resource: "pods"}
dynamicClient, err := dynamic.NewForConfig(clientConfig)
if err != nil {
t.Fatal(err)
}
lw := listWatcher(dynamicClient, podResource)
lw.DisableChunking = true
if _, err := lw.Watch(metav1.ListOptions{ResourceVersion: "1"}); err != nil {
t.Fatal(err)
}
if _, err := lw.List(metav1.ListOptions{ResourceVersion: "1"}); err != nil {
t.Fatal(err)
}
if e, a := 2, len(testHandler.actions); e != a {
t.Errorf("expect %d requests, got %d", e, a)
}
if e, a := "resourceVersion=1&watch=true", testHandler.actions[0].query; e != a {
t.Errorf("expect %s, got %s", e, a)
}
if e, a := "resourceVersion=1", testHandler.actions[1].query; e != a {
t.Errorf("expect %s, got %s", e, a)
}
}
func podToGCNode(pod *v1.Pod) *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
Kind: pod.Kind,
APIVersion: pod.APIVersion,
Name: pod.Name,
UID: pod.UID,
},
Namespace: pod.Namespace,
},
// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
owners: nil,
}
}
func TestAbsentUIDCache(t *testing.T) {
rc1Pod1 := getPod("rc1Pod1", []metav1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc1",
UID: "1",
APIVersion: "v1",
},
})
rc1Pod2 := getPod("rc1Pod2", []metav1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc1",
UID: "1",
APIVersion: "v1",
},
})
rc2Pod1 := getPod("rc2Pod1", []metav1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc2",
UID: "2",
APIVersion: "v1",
},
})
rc3Pod1 := getPod("rc3Pod1", []metav1.OwnerReference{
{
Kind: "ReplicationController",
Name: "rc3",
UID: "3",
APIVersion: "v1",
},
})
testHandler := &fakeActionHandler{
response: map[string]FakeResponse{
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod1": {
200,
serilizeOrDie(t, rc1Pod1),
},
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod2": {
200,
serilizeOrDie(t, rc1Pod2),
},
"GET" + "/api/v1/namespaces/ns1/pods/rc2Pod1": {
200,
serilizeOrDie(t, rc2Pod1),
},
"GET" + "/api/v1/namespaces/ns1/pods/rc3Pod1": {
200,
serilizeOrDie(t, rc3Pod1),
},
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc1": {
404,
[]byte{},
},
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc2": {
404,
[]byte{},
},
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc3": {
404,
[]byte{},
},
},
}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
gc := setupGC(t, clientConfig)
defer close(gc.stop)
gc.absentOwnerCache = NewUIDCache(2)
gc.attemptToDeleteItem(podToGCNode(rc1Pod1))
gc.attemptToDeleteItem(podToGCNode(rc2Pod1))
// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
gc.attemptToDeleteItem(podToGCNode(rc1Pod2))
// after this call, rc2 should be evicted from the UIDCache
gc.attemptToDeleteItem(podToGCNode(rc3Pod1))
// check cache
if !gc.absentOwnerCache.Has(types.UID("1")) {
t.Errorf("expected rc1 to be in the cache")
}
if gc.absentOwnerCache.Has(types.UID("2")) {
t.Errorf("expected rc2 to not exist in the cache")
}
if !gc.absentOwnerCache.Has(types.UID("3")) {
t.Errorf("expected rc3 to be in the cache")
}
// check the request sent to the server
count := 0
for _, action := range testHandler.actions {
if action.String() == "GET=/api/v1/namespaces/ns1/replicationcontrollers/rc1" {
count++
}
}
if count != 1 {
t.Errorf("expected only 1 GET rc1 request, got %d", count)
}
}
func TestDeleteOwnerRefPatch(t *testing.T) {
original := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "100",
OwnerReferences: []metav1.OwnerReference{
{UID: "1"},
{UID: "2"},
{UID: "3"},
},
},
}
originalData := serilizeOrDie(t, original)
expected := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "100",
OwnerReferences: []metav1.OwnerReference{
{UID: "1"},
},
},
}
patch := deleteOwnerRefStrategicMergePatch("100", "2", "3")
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
if err != nil {
t.Fatal(err)
}
var got v1.Pod
if err := json.Unmarshal(patched, &got); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expected, got) {
t.Errorf("expected: %#v,\ngot: %#v", expected, got)
}
}
func TestUnblockOwnerReference(t *testing.T) {
trueVar := true
falseVar := false
original := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "100",
OwnerReferences: []metav1.OwnerReference{
{UID: "1", BlockOwnerDeletion: &trueVar},
{UID: "2", BlockOwnerDeletion: &falseVar},
{UID: "3"},
},
},
}
originalData := serilizeOrDie(t, original)
expected := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "100",
OwnerReferences: []metav1.OwnerReference{
{UID: "1", BlockOwnerDeletion: &falseVar},
{UID: "2", BlockOwnerDeletion: &falseVar},
{UID: "3"},
},
},
}
accessor, err := meta.Accessor(&original)
if err != nil {
t.Fatal(err)
}
n := node{
owners: accessor.GetOwnerReferences(),
}
patch, err := n.unblockOwnerReferencesStrategicMergePatch()
if err != nil {
t.Fatal(err)
}
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
if err != nil {
t.Fatal(err)
}
var got v1.Pod
if err := json.Unmarshal(patched, &got); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expected, got) {
t.Errorf("expected: %#v,\ngot: %#v", expected, got)
t.Errorf("expected: %#v,\ngot: %#v", expected.OwnerReferences, got.OwnerReferences)
for _, ref := range got.OwnerReferences {
t.Errorf("ref.UID=%s, ref.BlockOwnerDeletion=%v", ref.UID, *ref.BlockOwnerDeletion)
}
}
}
func TestOrphanDependentsFailure(t *testing.T) {
testHandler := &fakeActionHandler{
response: map[string]FakeResponse{
"PATCH" + "/api/v1/namespaces/ns1/pods/pod": {
409,
[]byte{},
},
},
}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
gc := setupGC(t, clientConfig)
defer close(gc.stop)
dependents := []*node{
{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
Kind: "Pod",
APIVersion: "v1",
Name: "pod",
},
Namespace: "ns1",
},
},
}
err := gc.orphanDependents(objectReference{}, dependents)
expected := `the server reported a conflict`
if err == nil || !strings.Contains(err.Error(), expected) {
if err != nil {
t.Errorf("expected error contains text %q, got %q", expected, err.Error())
} else {
t.Errorf("expected error contains text %q, got nil", expected)
}
}
}
// TestGetDeletableResources ensures GetDeletableResources always returns
// something usable regardless of discovery output.
func TestGetDeletableResources(t *testing.T) {
tests := map[string]struct {
serverResources []*metav1.APIResourceList
err error
deletableResources map[schema.GroupVersionResource]struct{}
}{
"no error": {
serverResources: []*metav1.APIResourceList{
{
// Valid GroupVersion
GroupVersion: "apps/v1",
APIResources: []metav1.APIResource{
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
{Name: "services", Namespaced: true, Kind: "Service"},
},
},
{
// Invalid GroupVersion, should be ignored
GroupVersion: "foo//whatever",
APIResources: []metav1.APIResource{
{Name: "bars", Namespaced: true, Kind: "Bar", Verbs: metav1.Verbs{"delete", "list", "watch"}},
},
},
{
// Valid GroupVersion, missing required verbs, should be ignored
GroupVersion: "acme/v1",
APIResources: []metav1.APIResource{
{Name: "widgets", Namespaced: true, Kind: "Widget", Verbs: metav1.Verbs{"delete"}},
},
},
},
err: nil,
deletableResources: map[schema.GroupVersionResource]struct{}{
{Group: "apps", Version: "v1", Resource: "pods"}: {},
},
},
"nonspecific failure, includes usable results": {
serverResources: []*metav1.APIResourceList{
{
GroupVersion: "apps/v1",
APIResources: []metav1.APIResource{
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
{Name: "services", Namespaced: true, Kind: "Service"},
},
},
},
err: fmt.Errorf("internal error"),
deletableResources: map[schema.GroupVersionResource]struct{}{
{Group: "apps", Version: "v1", Resource: "pods"}: {},
},
},
"partial discovery failure, includes usable results": {
serverResources: []*metav1.APIResourceList{
{
GroupVersion: "apps/v1",
APIResources: []metav1.APIResource{
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
{Name: "services", Namespaced: true, Kind: "Service"},
},
},
},
err: &discovery.ErrGroupDiscoveryFailed{
Groups: map[schema.GroupVersion]error{
{Group: "foo", Version: "v1"}: fmt.Errorf("discovery failure"),
},
},
deletableResources: map[schema.GroupVersionResource]struct{}{
{Group: "apps", Version: "v1", Resource: "pods"}: {},
},
},
"discovery failure, no results": {
serverResources: nil,
err: fmt.Errorf("internal error"),
deletableResources: map[schema.GroupVersionResource]struct{}{},
},
}
for name, test := range tests {
t.Logf("testing %q", name)
client := &fakeServerResources{
PreferredResources: test.serverResources,
Error: test.err,
}
actual := GetDeletableResources(client)
if !reflect.DeepEqual(test.deletableResources, actual) {
t.Errorf("expected resources:\n%v\ngot:\n%v", test.deletableResources, actual)
}
}
}
// TestGarbageCollectorSync ensures that a discovery client error
// will not cause the garbage collector to block infinitely.
func TestGarbageCollectorSync(t *testing.T) {
serverResources := []*metav1.APIResourceList{
{
GroupVersion: "v1",
APIResources: []metav1.APIResource{
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
},
},
}
unsyncableServerResources := []*metav1.APIResourceList{
{
GroupVersion: "v1",
APIResources: []metav1.APIResource{
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
{Name: "secrets", Namespaced: true, Kind: "Secret", Verbs: metav1.Verbs{"delete", "list", "watch"}},
},
},
}
fakeDiscoveryClient := &fakeServerResources{
PreferredResources: serverResources,
Error: nil,
Lock: sync.Mutex{},
InterfaceUsedCount: 0,
}
testHandler := &fakeActionHandler{
response: map[string]FakeResponse{
"GET" + "/api/v1/pods": {
200,
[]byte("{}"),
},
"GET" + "/api/v1/secrets": {
404,
[]byte("{}"),
},
},
}
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
defer srv.Close()
clientConfig.ContentConfig.NegotiatedSerializer = nil
client, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
t.Fatal(err)
}
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
dynamicClient, err := dynamic.NewForConfig(clientConfig)
if err != nil {
t.Fatal(err)
}
podResource := map[schema.GroupVersionResource]struct{}{
{Group: "", Version: "v1", Resource: "pods"}: {},
}
sharedInformers := informers.NewSharedInformerFactory(client, 0)
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := NewGarbageCollector(dynamicClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
if err != nil {
t.Fatal(err)
}
stopCh := make(chan struct{})
defer close(stopCh)
go gc.Run(1, stopCh)
go gc.Sync(fakeDiscoveryClient, 10*time.Millisecond, stopCh)
// Wait until the sync discovers the initial resources
fmt.Printf("Test output")
time.Sleep(1 * time.Second)
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
if err != nil {
t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
}
// Simulate the discovery client returning an error
fakeDiscoveryClient.setPreferredResources(nil)
fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()"))
// Wait until sync discovers the change
time.Sleep(1 * time.Second)
// Remove the error from being returned and see if the garbage collector sync is still working
fakeDiscoveryClient.setPreferredResources(serverResources)
fakeDiscoveryClient.setError(nil)
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
if err != nil {
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
}
// Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
fakeDiscoveryClient.setPreferredResources(unsyncableServerResources)
fakeDiscoveryClient.setError(nil)
// Wait until sync discovers the change
time.Sleep(1 * time.Second)
// Put the resources back to normal and ensure garbage collector sync recovers
fakeDiscoveryClient.setPreferredResources(serverResources)
fakeDiscoveryClient.setError(nil)
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
if err != nil {
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
}
}
func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
before := fakeDiscoveryClient.getInterfaceUsedCount()
t := 1 * time.Second
time.Sleep(t)
after := fakeDiscoveryClient.getInterfaceUsedCount()
if before == after {
return fmt.Errorf("discoveryClient.ServerPreferredResources() called %d times over %v", after-before, t)
}
workerLockAcquired := make(chan struct{})
go func() {
workerLock.Lock()
workerLock.Unlock()
close(workerLockAcquired)
}()
select {
case <-workerLockAcquired:
return nil
case <-time.After(t):
return fmt.Errorf("workerLock blocked for at least %v", t)
}
}
type fakeServerResources struct {
PreferredResources []*metav1.APIResourceList
Error error
Lock sync.Mutex
InterfaceUsedCount int
}
func (_ *fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
return nil, nil
}
func (_ *fakeServerResources) ServerResources() ([]*metav1.APIResourceList, error) {
return nil, nil
}
func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
f.Lock.Lock()
defer f.Lock.Unlock()
f.InterfaceUsedCount++
return f.PreferredResources, f.Error
}
func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList) {
f.Lock.Lock()
defer f.Lock.Unlock()
f.PreferredResources = resources
}
func (f *fakeServerResources) setError(err error) {
f.Lock.Lock()
defer f.Lock.Unlock()
f.Error = err
}
func (f *fakeServerResources) getInterfaceUsedCount() int {
f.Lock.Lock()
defer f.Lock.Unlock()
return f.InterfaceUsedCount
}
func (_ *fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
return nil, nil
}

View File

@ -1,181 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
type objectReference struct {
metav1.OwnerReference
// This is needed by the dynamic client
Namespace string
}
func (s objectReference) String() string {
return fmt.Sprintf("[%s/%s, namespace: %s, name: %s, uid: %s]", s.APIVersion, s.Kind, s.Namespace, s.Name, s.UID)
}
// The single-threaded GraphBuilder.processGraphChanges() is the sole writer of the
// nodes. The multi-threaded GarbageCollector.attemptToDeleteItem() reads the nodes.
// WARNING: node has different locks on different fields. setters and getters
// use the respective locks, so the return values of the getters can be
// inconsistent.
type node struct {
identity objectReference
// dependents will be read by the orphan() routine, we need to protect it with a lock.
dependentsLock sync.RWMutex
// dependents are the nodes that have node.identity as a
// metadata.ownerReference.
dependents map[*node]struct{}
// this is set by processGraphChanges() if the object has non-nil DeletionTimestamp
// and has the FinalizerDeleteDependents.
deletingDependents bool
deletingDependentsLock sync.RWMutex
// this records if the object's deletionTimestamp is non-nil.
beingDeleted bool
beingDeletedLock sync.RWMutex
// this records if the object was constructed virtually and never observed via informer event
virtual bool
virtualLock sync.RWMutex
// when processing an Update event, we need to compare the updated
// ownerReferences with the owners recorded in the graph.
owners []metav1.OwnerReference
}
// An object is on a one way trip to its final deletion if it starts being
// deleted, so we only provide a function to set beingDeleted to true.
func (n *node) markBeingDeleted() {
n.beingDeletedLock.Lock()
defer n.beingDeletedLock.Unlock()
n.beingDeleted = true
}
func (n *node) isBeingDeleted() bool {
n.beingDeletedLock.RLock()
defer n.beingDeletedLock.RUnlock()
return n.beingDeleted
}
func (n *node) markObserved() {
n.virtualLock.Lock()
defer n.virtualLock.Unlock()
n.virtual = false
}
func (n *node) isObserved() bool {
n.virtualLock.RLock()
defer n.virtualLock.RUnlock()
return n.virtual == false
}
func (n *node) markDeletingDependents() {
n.deletingDependentsLock.Lock()
defer n.deletingDependentsLock.Unlock()
n.deletingDependents = true
}
func (n *node) isDeletingDependents() bool {
n.deletingDependentsLock.RLock()
defer n.deletingDependentsLock.RUnlock()
return n.deletingDependents
}
func (ownerNode *node) addDependent(dependent *node) {
ownerNode.dependentsLock.Lock()
defer ownerNode.dependentsLock.Unlock()
ownerNode.dependents[dependent] = struct{}{}
}
func (ownerNode *node) deleteDependent(dependent *node) {
ownerNode.dependentsLock.Lock()
defer ownerNode.dependentsLock.Unlock()
delete(ownerNode.dependents, dependent)
}
func (ownerNode *node) dependentsLength() int {
ownerNode.dependentsLock.RLock()
defer ownerNode.dependentsLock.RUnlock()
return len(ownerNode.dependents)
}
// Note that this function does not provide any synchronization guarantees;
// items could be added to or removed from ownerNode.dependents the moment this
// function returns.
func (ownerNode *node) getDependents() []*node {
ownerNode.dependentsLock.RLock()
defer ownerNode.dependentsLock.RUnlock()
var ret []*node
for dep := range ownerNode.dependents {
ret = append(ret, dep)
}
return ret
}
// blockingDependents returns the dependents that are blocking the deletion of
// n, i.e., the dependent that has an ownerReference pointing to n, and
// the BlockOwnerDeletion field of that ownerReference is true.
// Note that this function does not provide any synchronization guarantees;
// items could be added to or removed from ownerNode.dependents the moment this
// function returns.
func (n *node) blockingDependents() []*node {
dependents := n.getDependents()
var ret []*node
for _, dep := range dependents {
for _, owner := range dep.owners {
if owner.UID == n.identity.UID && owner.BlockOwnerDeletion != nil && *owner.BlockOwnerDeletion {
ret = append(ret, dep)
}
}
}
return ret
}
// String renders node as a string using fmt. Acquires a read lock to ensure the
// reflective dump of dependents doesn't race with any concurrent writes.
func (n *node) String() string {
n.dependentsLock.RLock()
defer n.dependentsLock.RUnlock()
return fmt.Sprintf("%#v", n)
}
type concurrentUIDToNode struct {
uidToNodeLock sync.RWMutex
uidToNode map[types.UID]*node
}
func (m *concurrentUIDToNode) Write(node *node) {
m.uidToNodeLock.Lock()
defer m.uidToNodeLock.Unlock()
m.uidToNode[node.identity.UID] = node
}
func (m *concurrentUIDToNode) Read(uid types.UID) (*node, bool) {
m.uidToNodeLock.RLock()
defer m.uidToNodeLock.RUnlock()
n, ok := m.uidToNode[uid]
return n, ok
}
func (m *concurrentUIDToNode) Delete(uid types.UID) {
m.uidToNodeLock.Lock()
defer m.uidToNodeLock.Unlock()
delete(m.uidToNode, uid)
}

View File

@ -1,662 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
"reflect"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
)
type eventType int
func (e eventType) String() string {
switch e {
case addEvent:
return "add"
case updateEvent:
return "update"
case deleteEvent:
return "delete"
default:
return fmt.Sprintf("unknown(%d)", int(e))
}
}
const (
addEvent eventType = iota
updateEvent
deleteEvent
)
type event struct {
eventType eventType
obj interface{}
// the update event comes with an old object, but it's not used by the garbage collector.
oldObj interface{}
gvk schema.GroupVersionKind
}
// GraphBuilder: based on the events supplied by the informers, GraphBuilder updates
// uidToNode, a graph that caches the dependencies as we know, and enqueues
// items to the attemptToDelete and attemptToOrphan.
type GraphBuilder struct {
restMapper meta.RESTMapper
// each monitor list/watches a resource, the results are funneled to the
// dependencyGraphBuilder
monitors monitors
monitorLock sync.RWMutex
// informersStarted is closed after after all of the controllers have been initialized and are running.
// After that it is safe to start them here, before that it is not.
informersStarted <-chan struct{}
// stopCh drives shutdown. When a receive from it unblocks, monitors will shut down.
// This channel is also protected by monitorLock.
stopCh <-chan struct{}
// running tracks whether Run() has been called.
// it is protected by monitorLock.
running bool
dynamicClient dynamic.Interface
// monitors are the producer of the graphChanges queue, graphBuilder alters
// the in-memory graph according to the changes.
graphChanges workqueue.RateLimitingInterface
// uidToNode doesn't require a lock to protect, because only the
// single-threaded GraphBuilder.processGraphChanges() reads/writes it.
uidToNode *concurrentUIDToNode
// GraphBuilder is the producer of attemptToDelete and attemptToOrphan, GC is the consumer.
attemptToDelete workqueue.RateLimitingInterface
attemptToOrphan workqueue.RateLimitingInterface
// GraphBuilder and GC share the absentOwnerCache. Objects that are known to
// be non-existent are added to the cached.
absentOwnerCache *UIDCache
sharedInformers informers.SharedInformerFactory
ignoredResources map[schema.GroupResource]struct{}
}
// monitor runs a Controller with a local stop channel.
type monitor struct {
controller cache.Controller
store cache.Store
// stopCh stops Controller. If stopCh is nil, the monitor is considered to be
// not yet started.
stopCh chan struct{}
}
// Run is intended to be called in a goroutine. Multiple calls of this is an
// error.
func (m *monitor) Run() {
m.controller.Run(m.stopCh)
}
type monitors map[schema.GroupVersionResource]*monitor
func listWatcher(client dynamic.Interface, resource schema.GroupVersionResource) *cache.ListWatch {
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
// We want to list this resource in all namespaces if it's namespace scoped, so not passing namespace is ok.
return client.Resource(resource).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// We want to list this resource in all namespaces if it's namespace scoped, so not passing namespace is ok.
return client.Resource(resource).Watch(options)
},
}
}
func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, cache.Store, error) {
handlers := cache.ResourceEventHandlerFuncs{
// add the event to the dependencyGraphBuilder's graphChanges.
AddFunc: func(obj interface{}) {
event := &event{
eventType: addEvent,
obj: obj,
gvk: kind,
}
gb.graphChanges.Add(event)
},
UpdateFunc: func(oldObj, newObj interface{}) {
// TODO: check if there are differences in the ownerRefs,
// finalizers, and DeletionTimestamp; if not, ignore the update.
event := &event{
eventType: updateEvent,
obj: newObj,
oldObj: oldObj,
gvk: kind,
}
gb.graphChanges.Add(event)
},
DeleteFunc: func(obj interface{}) {
// delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it
if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = deletedFinalStateUnknown.Obj
}
event := &event{
eventType: deleteEvent,
obj: obj,
gvk: kind,
}
gb.graphChanges.Add(event)
},
}
shared, err := gb.sharedInformers.ForResource(resource)
if err == nil {
glog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
// need to clone because it's from a shared cache
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime)
return shared.Informer().GetController(), shared.Informer().GetStore(), nil
} else {
glog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
}
// TODO: consider store in one storage.
glog.V(5).Infof("create storage for resource %s", resource)
store, monitor := cache.NewInformer(
listWatcher(gb.dynamicClient, resource),
nil,
ResourceResyncTime,
// don't need to clone because it's not from shared cache
handlers,
)
return monitor, store, nil
}
// syncMonitors rebuilds the monitor set according to the supplied resources,
// creating or deleting monitors as necessary. It will return any error
// encountered, but will make an attempt to create a monitor for each resource
// instead of immediately exiting on an error. It may be called before or after
// Run. Monitors are NOT started as part of the sync. To ensure all existing
// monitors are started, call startMonitors.
func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
gb.monitorLock.Lock()
defer gb.monitorLock.Unlock()
toRemove := gb.monitors
if toRemove == nil {
toRemove = monitors{}
}
current := monitors{}
errs := []error{}
kept := 0
added := 0
for resource := range resources {
if _, ok := gb.ignoredResources[resource.GroupResource()]; ok {
continue
}
if m, ok := toRemove[resource]; ok {
current[resource] = m
delete(toRemove, resource)
kept++
continue
}
kind, err := gb.restMapper.KindFor(resource)
if err != nil {
errs = append(errs, fmt.Errorf("couldn't look up resource %q: %v", resource, err))
continue
}
c, s, err := gb.controllerFor(resource, kind)
if err != nil {
errs = append(errs, fmt.Errorf("couldn't start monitor for resource %q: %v", resource, err))
continue
}
current[resource] = &monitor{store: s, controller: c}
added++
}
gb.monitors = current
for _, monitor := range toRemove {
if monitor.stopCh != nil {
close(monitor.stopCh)
}
}
glog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
// NewAggregate returns nil if errs is 0-length
return utilerrors.NewAggregate(errs)
}
// startMonitors ensures the current set of monitors are running. Any newly
// started monitors will also cause shared informers to be started.
//
// If called before Run, startMonitors does nothing (as there is no stop channel
// to support monitor/informer execution).
func (gb *GraphBuilder) startMonitors() {
gb.monitorLock.Lock()
defer gb.monitorLock.Unlock()
if !gb.running {
return
}
// we're waiting until after the informer start that happens once all the controllers are initialized. This ensures
// that they don't get unexpected events on their work queues.
<-gb.informersStarted
monitors := gb.monitors
started := 0
for _, monitor := range monitors {
if monitor.stopCh == nil {
monitor.stopCh = make(chan struct{})
gb.sharedInformers.Start(gb.stopCh)
go monitor.Run()
started++
}
}
glog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors))
}
// IsSynced returns true if any monitors exist AND all those monitors'
// controllers HasSynced functions return true. This means IsSynced could return
// true at one time, and then later return false if all monitors were
// reconstructed.
func (gb *GraphBuilder) IsSynced() bool {
gb.monitorLock.Lock()
defer gb.monitorLock.Unlock()
if len(gb.monitors) == 0 {
glog.V(4).Info("garbage controller monitor not synced: no monitors")
return false
}
for resource, monitor := range gb.monitors {
if !monitor.controller.HasSynced() {
glog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
return false
}
}
return true
}
// Run sets the stop channel and starts monitor execution until stopCh is
// closed. Any running monitors will be stopped before Run returns.
func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
glog.Infof("GraphBuilder running")
defer glog.Infof("GraphBuilder stopping")
// Set up the stop channel.
gb.monitorLock.Lock()
gb.stopCh = stopCh
gb.running = true
gb.monitorLock.Unlock()
// Start monitors and begin change processing until the stop channel is
// closed.
gb.startMonitors()
wait.Until(gb.runProcessGraphChanges, 1*time.Second, stopCh)
// Stop any running monitors.
gb.monitorLock.Lock()
defer gb.monitorLock.Unlock()
monitors := gb.monitors
stopped := 0
for _, monitor := range monitors {
if monitor.stopCh != nil {
stopped++
close(monitor.stopCh)
}
}
// reset monitors so that the graph builder can be safely re-run/synced.
gb.monitors = nil
glog.Infof("stopped %d of %d monitors", stopped, len(monitors))
}
var ignoredResources = map[schema.GroupResource]struct{}{
{Group: "extensions", Resource: "replicationcontrollers"}: {},
{Group: "", Resource: "bindings"}: {},
{Group: "", Resource: "componentstatuses"}: {},
{Group: "", Resource: "events"}: {},
{Group: "authentication.k8s.io", Resource: "tokenreviews"}: {},
{Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}: {},
{Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}: {},
{Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: {},
{Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}: {},
}
// DefaultIgnoredResources returns the default set of resources that the garbage collector controller
// should ignore. This is exposed so downstream integrators can have access to the defaults, and add
// to them as necessary when constructing the controller.
func DefaultIgnoredResources() map[schema.GroupResource]struct{} {
return ignoredResources
}
// enqueueVirtualDeleteEvent is used to add a virtual delete event to be processed for virtual nodes
// once it is determined they do not have backing objects in storage
func (gb *GraphBuilder) enqueueVirtualDeleteEvent(ref objectReference) {
gb.graphChanges.Add(&event{
eventType: deleteEvent,
obj: &metaonly.MetadataOnlyObject{
TypeMeta: metav1.TypeMeta{APIVersion: ref.APIVersion, Kind: ref.Kind},
ObjectMeta: metav1.ObjectMeta{Namespace: ref.Namespace, UID: ref.UID, Name: ref.Name},
},
})
}
// addDependentToOwners adds n to owners' dependents list. If the owner does not
// exist in the gb.uidToNode yet, a "virtual" node will be created to represent
// the owner. The "virtual" node will be enqueued to the attemptToDelete, so that
// attemptToDeleteItem() will verify if the owner exists according to the API server.
func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerReference) {
for _, owner := range owners {
ownerNode, ok := gb.uidToNode.Read(owner.UID)
if !ok {
// Create a "virtual" node in the graph for the owner if it doesn't
// exist in the graph yet.
ownerNode = &node{
identity: objectReference{
OwnerReference: owner,
Namespace: n.identity.Namespace,
},
dependents: make(map[*node]struct{}),
virtual: true,
}
glog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity)
gb.uidToNode.Write(ownerNode)
}
ownerNode.addDependent(n)
if !ok {
// Enqueue the virtual node into attemptToDelete.
// The garbage processor will enqueue a virtual delete
// event to delete it from the graph if API server confirms this
// owner doesn't exist.
gb.attemptToDelete.Add(ownerNode)
}
}
}
// insertNode insert the node to gb.uidToNode; then it finds all owners as listed
// in n.owners, and adds the node to their dependents list.
func (gb *GraphBuilder) insertNode(n *node) {
gb.uidToNode.Write(n)
gb.addDependentToOwners(n, n.owners)
}
// removeDependentFromOwners remove n from owners' dependents list.
func (gb *GraphBuilder) removeDependentFromOwners(n *node, owners []metav1.OwnerReference) {
for _, owner := range owners {
ownerNode, ok := gb.uidToNode.Read(owner.UID)
if !ok {
continue
}
ownerNode.deleteDependent(n)
}
}
// removeNode removes the node from gb.uidToNode, then finds all
// owners as listed in n.owners, and removes n from their dependents list.
func (gb *GraphBuilder) removeNode(n *node) {
gb.uidToNode.Delete(n.identity.UID)
gb.removeDependentFromOwners(n, n.owners)
}
type ownerRefPair struct {
oldRef metav1.OwnerReference
newRef metav1.OwnerReference
}
// TODO: profile this function to see if a naive N^2 algorithm performs better
// when the number of references is small.
func referencesDiffs(old []metav1.OwnerReference, new []metav1.OwnerReference) (added []metav1.OwnerReference, removed []metav1.OwnerReference, changed []ownerRefPair) {
oldUIDToRef := make(map[string]metav1.OwnerReference)
for _, value := range old {
oldUIDToRef[string(value.UID)] = value
}
oldUIDSet := sets.StringKeySet(oldUIDToRef)
newUIDToRef := make(map[string]metav1.OwnerReference)
for _, value := range new {
newUIDToRef[string(value.UID)] = value
}
newUIDSet := sets.StringKeySet(newUIDToRef)
addedUID := newUIDSet.Difference(oldUIDSet)
removedUID := oldUIDSet.Difference(newUIDSet)
intersection := oldUIDSet.Intersection(newUIDSet)
for uid := range addedUID {
added = append(added, newUIDToRef[uid])
}
for uid := range removedUID {
removed = append(removed, oldUIDToRef[uid])
}
for uid := range intersection {
if !reflect.DeepEqual(oldUIDToRef[uid], newUIDToRef[uid]) {
changed = append(changed, ownerRefPair{oldRef: oldUIDToRef[uid], newRef: newUIDToRef[uid]})
}
}
return added, removed, changed
}
// returns if the object in the event just transitions to "being deleted".
func deletionStarts(oldObj interface{}, newAccessor metav1.Object) bool {
// The delta_fifo may combine the creation and update of the object into one
// event, so if there is no oldObj, we just return if the newObj (via
// newAccessor) is being deleted.
if oldObj == nil {
if newAccessor.GetDeletionTimestamp() == nil {
return false
}
return true
}
oldAccessor, err := meta.Accessor(oldObj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("cannot access oldObj: %v", err))
return false
}
return beingDeleted(newAccessor) && !beingDeleted(oldAccessor)
}
func beingDeleted(accessor metav1.Object) bool {
return accessor.GetDeletionTimestamp() != nil
}
func hasDeleteDependentsFinalizer(accessor metav1.Object) bool {
finalizers := accessor.GetFinalizers()
for _, finalizer := range finalizers {
if finalizer == metav1.FinalizerDeleteDependents {
return true
}
}
return false
}
func hasOrphanFinalizer(accessor metav1.Object) bool {
finalizers := accessor.GetFinalizers()
for _, finalizer := range finalizers {
if finalizer == metav1.FinalizerOrphanDependents {
return true
}
}
return false
}
// this function takes newAccessor directly because the caller already
// instantiates an accessor for the newObj.
func startsWaitingForDependentsDeleted(oldObj interface{}, newAccessor metav1.Object) bool {
return deletionStarts(oldObj, newAccessor) && hasDeleteDependentsFinalizer(newAccessor)
}
// this function takes newAccessor directly because the caller already
// instantiates an accessor for the newObj.
func startsWaitingForDependentsOrphaned(oldObj interface{}, newAccessor metav1.Object) bool {
return deletionStarts(oldObj, newAccessor) && hasOrphanFinalizer(newAccessor)
}
// if an blocking ownerReference points to an object gets removed, or gets set to
// "BlockOwnerDeletion=false", add the object to the attemptToDelete queue.
func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerReference, changed []ownerRefPair) {
for _, ref := range removed {
if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion {
node, found := gb.uidToNode.Read(ref.UID)
if !found {
glog.V(5).Infof("cannot find %s in uidToNode", ref.UID)
continue
}
gb.attemptToDelete.Add(node)
}
}
for _, c := range changed {
wasBlocked := c.oldRef.BlockOwnerDeletion != nil && *c.oldRef.BlockOwnerDeletion
isUnblocked := c.newRef.BlockOwnerDeletion == nil || (c.newRef.BlockOwnerDeletion != nil && !*c.newRef.BlockOwnerDeletion)
if wasBlocked && isUnblocked {
node, found := gb.uidToNode.Read(c.newRef.UID)
if !found {
glog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID)
continue
}
gb.attemptToDelete.Add(node)
}
}
}
func (gb *GraphBuilder) processTransitions(oldObj interface{}, newAccessor metav1.Object, n *node) {
if startsWaitingForDependentsOrphaned(oldObj, newAccessor) {
glog.V(5).Infof("add %s to the attemptToOrphan", n.identity)
gb.attemptToOrphan.Add(n)
return
}
if startsWaitingForDependentsDeleted(oldObj, newAccessor) {
glog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity)
// if the n is added as a "virtual" node, its deletingDependents field is not properly set, so always set it here.
n.markDeletingDependents()
for dep := range n.dependents {
gb.attemptToDelete.Add(dep)
}
gb.attemptToDelete.Add(n)
}
}
func (gb *GraphBuilder) runProcessGraphChanges() {
for gb.processGraphChanges() {
}
}
// Dequeueing an event from graphChanges, updating graph, populating dirty_queue.
func (gb *GraphBuilder) processGraphChanges() bool {
item, quit := gb.graphChanges.Get()
if quit {
return false
}
defer gb.graphChanges.Done(item)
event, ok := item.(*event)
if !ok {
utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item))
return true
}
obj := event.obj
accessor, err := meta.Accessor(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
return true
}
glog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
// Check if the node already exsits
existingNode, found := gb.uidToNode.Read(accessor.GetUID())
if found {
// this marks the node as having been observed via an informer event
// 1. this depends on graphChanges only containing add/update events from the actual informer
// 2. this allows things tracking virtual nodes' existence to stop polling and rely on informer events
existingNode.markObserved()
}
switch {
case (event.eventType == addEvent || event.eventType == updateEvent) && !found:
newNode := &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
APIVersion: event.gvk.GroupVersion().String(),
Kind: event.gvk.Kind,
UID: accessor.GetUID(),
Name: accessor.GetName(),
},
Namespace: accessor.GetNamespace(),
},
dependents: make(map[*node]struct{}),
owners: accessor.GetOwnerReferences(),
deletingDependents: beingDeleted(accessor) && hasDeleteDependentsFinalizer(accessor),
beingDeleted: beingDeleted(accessor),
}
gb.insertNode(newNode)
// the underlying delta_fifo may combine a creation and a deletion into
// one event, so we need to further process the event.
gb.processTransitions(event.oldObj, accessor, newNode)
case (event.eventType == addEvent || event.eventType == updateEvent) && found:
// handle changes in ownerReferences
added, removed, changed := referencesDiffs(existingNode.owners, accessor.GetOwnerReferences())
if len(added) != 0 || len(removed) != 0 || len(changed) != 0 {
// check if the changed dependency graph unblock owners that are
// waiting for the deletion of their dependents.
gb.addUnblockedOwnersToDeleteQueue(removed, changed)
// update the node itself
existingNode.owners = accessor.GetOwnerReferences()
// Add the node to its new owners' dependent lists.
gb.addDependentToOwners(existingNode, added)
// remove the node from the dependent list of node that are no longer in
// the node's owners list.
gb.removeDependentFromOwners(existingNode, removed)
}
if beingDeleted(accessor) {
existingNode.markBeingDeleted()
}
gb.processTransitions(event.oldObj, accessor, existingNode)
case event.eventType == deleteEvent:
if !found {
glog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID())
return true
}
// removeNode updates the graph
gb.removeNode(existingNode)
existingNode.dependentsLock.RLock()
defer existingNode.dependentsLock.RUnlock()
if len(existingNode.dependents) > 0 {
gb.absentOwnerCache.Add(accessor.GetUID())
}
for dep := range existingNode.dependents {
gb.attemptToDelete.Add(dep)
}
for _, owner := range existingNode.owners {
ownerNode, found := gb.uidToNode.Read(owner.UID)
if !found || !ownerNode.isDeletingDependents() {
continue
}
// this is to let attempToDeleteItem check if all the owner's
// dependents are deleted, if so, the owner will be deleted.
gb.attemptToDelete.Add(ownerNode)
}
}
return true
}

View File

@ -1,32 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"types.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly",
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

Some files were not shown because too many files have changed in this diff Show More