mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
79
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
Normal file
79
vendor/k8s.io/kubernetes/pkg/controller/disruption/BUILD
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["disruption.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/disruption",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["disruption_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/disruption",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
2
vendor/k8s.io/kubernetes/pkg/controller/disruption/OWNERS
generated
vendored
Executable file
2
vendor/k8s.io/kubernetes/pkg/controller/disruption/OWNERS
generated
vendored
Executable file
@ -0,0 +1,2 @@
|
||||
reviewers:
|
||||
- sig-apps-reviewers
|
743
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
Normal file
743
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption.go
generated
vendored
Normal file
@ -0,0 +1,743 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package disruption
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1beta1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
|
||||
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
policyclientset "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1beta1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const statusUpdateRetries = 2
|
||||
|
||||
// DeletionTimeout sets maximum time from the moment a pod is added to DisruptedPods in PDB.Status
|
||||
// to the time when the pod is expected to be seen by PDB controller as having been marked for deletion.
|
||||
// If the pod was not marked for deletion during that time it is assumed that it won't be deleted at
|
||||
// all and the corresponding entry can be removed from pdb.Status.DisruptedPods. It is assumed that
|
||||
// pod/pdb apiserver to controller latency is relatively small (like 1-2sec) so the below value should
|
||||
// be more than enough.
|
||||
// If the controller is running on a different node it is important that the two nodes have synced
|
||||
// clock (via ntp for example). Otherwise PodDisruptionBudget controller may not provide enough
|
||||
// protection against unwanted pod disruptions.
|
||||
const DeletionTimeout = 2 * 60 * time.Second
|
||||
|
||||
type updater func(*policy.PodDisruptionBudget) error
|
||||
|
||||
type DisruptionController struct {
|
||||
kubeClient clientset.Interface
|
||||
|
||||
pdbLister policylisters.PodDisruptionBudgetLister
|
||||
pdbListerSynced cache.InformerSynced
|
||||
|
||||
podLister corelisters.PodLister
|
||||
podListerSynced cache.InformerSynced
|
||||
|
||||
rcLister corelisters.ReplicationControllerLister
|
||||
rcListerSynced cache.InformerSynced
|
||||
|
||||
rsLister extensionslisters.ReplicaSetLister
|
||||
rsListerSynced cache.InformerSynced
|
||||
|
||||
dLister extensionslisters.DeploymentLister
|
||||
dListerSynced cache.InformerSynced
|
||||
|
||||
ssLister appslisters.StatefulSetLister
|
||||
ssListerSynced cache.InformerSynced
|
||||
|
||||
// PodDisruptionBudget keys that need to be synced.
|
||||
queue workqueue.RateLimitingInterface
|
||||
recheckQueue workqueue.DelayingInterface
|
||||
|
||||
broadcaster record.EventBroadcaster
|
||||
recorder record.EventRecorder
|
||||
|
||||
getUpdater func() updater
|
||||
}
|
||||
|
||||
// controllerAndScale is used to return (controller, scale) pairs from the
|
||||
// controller finder functions.
|
||||
type controllerAndScale struct {
|
||||
types.UID
|
||||
scale int32
|
||||
}
|
||||
|
||||
// podControllerFinder is a function type that maps a pod to a list of
|
||||
// controllers and their scale.
|
||||
type podControllerFinder func(*v1.Pod) (*controllerAndScale, error)
|
||||
|
||||
func NewDisruptionController(
|
||||
podInformer coreinformers.PodInformer,
|
||||
pdbInformer policyinformers.PodDisruptionBudgetInformer,
|
||||
rcInformer coreinformers.ReplicationControllerInformer,
|
||||
rsInformer extensionsinformers.ReplicaSetInformer,
|
||||
dInformer extensionsinformers.DeploymentInformer,
|
||||
ssInformer appsinformers.StatefulSetInformer,
|
||||
kubeClient clientset.Interface,
|
||||
) *DisruptionController {
|
||||
dc := &DisruptionController{
|
||||
kubeClient: kubeClient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "disruption"),
|
||||
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||
|
||||
dc.getUpdater = func() updater { return dc.writePdbStatus }
|
||||
|
||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addPod,
|
||||
UpdateFunc: dc.updatePod,
|
||||
DeleteFunc: dc.deletePod,
|
||||
})
|
||||
dc.podLister = podInformer.Lister()
|
||||
dc.podListerSynced = podInformer.Informer().HasSynced
|
||||
|
||||
pdbInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: dc.addDb,
|
||||
UpdateFunc: dc.updateDb,
|
||||
DeleteFunc: dc.removeDb,
|
||||
},
|
||||
30*time.Second,
|
||||
)
|
||||
dc.pdbLister = pdbInformer.Lister()
|
||||
dc.pdbListerSynced = pdbInformer.Informer().HasSynced
|
||||
|
||||
dc.rcLister = rcInformer.Lister()
|
||||
dc.rcListerSynced = rcInformer.Informer().HasSynced
|
||||
|
||||
dc.rsLister = rsInformer.Lister()
|
||||
dc.rsListerSynced = rsInformer.Informer().HasSynced
|
||||
|
||||
dc.dLister = dInformer.Lister()
|
||||
dc.dListerSynced = dInformer.Informer().HasSynced
|
||||
|
||||
dc.ssLister = ssInformer.Lister()
|
||||
dc.ssListerSynced = ssInformer.Informer().HasSynced
|
||||
|
||||
return dc
|
||||
}
|
||||
|
||||
// TODO(mml): When controllerRef is implemented (#2210), we *could* simply
|
||||
// return controllers without their scales, and access scale type-generically
|
||||
// via the scale subresource. That may not be as much of a win as it sounds,
|
||||
// however. We are accessing everything through the pkg/client/cache API that
|
||||
// we have to set up and tune to the types we know we'll be accessing anyway,
|
||||
// and we may well need further tweaks just to be able to access scale
|
||||
// subresources.
|
||||
func (dc *DisruptionController) finders() []podControllerFinder {
|
||||
return []podControllerFinder{dc.getPodReplicationController, dc.getPodDeployment, dc.getPodReplicaSet,
|
||||
dc.getPodStatefulSet}
|
||||
}
|
||||
|
||||
var (
|
||||
controllerKindRS = v1beta1.SchemeGroupVersion.WithKind("ReplicaSet")
|
||||
controllerKindSS = apps.SchemeGroupVersion.WithKind("StatefulSet")
|
||||
controllerKindRC = v1.SchemeGroupVersion.WithKind("ReplicationController")
|
||||
controllerKindDep = v1beta1.SchemeGroupVersion.WithKind("Deployment")
|
||||
)
|
||||
|
||||
// getPodReplicaSet finds a replicaset which has no matching deployments.
|
||||
func (dc *DisruptionController) getPodReplicaSet(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if controllerRef.Kind != controllerKindRS.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
}
|
||||
if rs.UID != controllerRef.UID {
|
||||
return nil, nil
|
||||
}
|
||||
controllerRef = metav1.GetControllerOf(rs)
|
||||
if controllerRef != nil && controllerRef.Kind == controllerKindDep.Kind {
|
||||
// Skip RS if it's controlled by a Deployment.
|
||||
return nil, nil
|
||||
}
|
||||
return &controllerAndScale{rs.UID, *(rs.Spec.Replicas)}, nil
|
||||
}
|
||||
|
||||
// getPodStatefulSet returns the statefulset managing the given pod.
|
||||
func (dc *DisruptionController) getPodStatefulSet(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if controllerRef.Kind != controllerKindSS.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
ss, err := dc.ssLister.StatefulSets(pod.Namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
}
|
||||
if ss.UID != controllerRef.UID {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &controllerAndScale{ss.UID, *(ss.Spec.Replicas)}, nil
|
||||
}
|
||||
|
||||
// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
|
||||
func (dc *DisruptionController) getPodDeployment(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if controllerRef.Kind != controllerKindRS.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
rs, err := dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
}
|
||||
if rs.UID != controllerRef.UID {
|
||||
return nil, nil
|
||||
}
|
||||
controllerRef = metav1.GetControllerOf(rs)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if controllerRef.Kind != controllerKindDep.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
deployment, err := dc.dLister.Deployments(rs.Namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
}
|
||||
if deployment.UID != controllerRef.UID {
|
||||
return nil, nil
|
||||
}
|
||||
return &controllerAndScale{deployment.UID, *(deployment.Spec.Replicas)}, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPodReplicationController(pod *v1.Pod) (*controllerAndScale, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if controllerRef.Kind != controllerKindRC.Kind {
|
||||
return nil, nil
|
||||
}
|
||||
rc, err := dc.rcLister.ReplicationControllers(pod.Namespace).Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
// The only possible error is NotFound, which is ok here.
|
||||
return nil, nil
|
||||
}
|
||||
if rc.UID != controllerRef.UID {
|
||||
return nil, nil
|
||||
}
|
||||
return &controllerAndScale{rc.UID, *(rc.Spec.Replicas)}, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer dc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting disruption controller")
|
||||
defer glog.Infof("Shutting down disruption controller")
|
||||
|
||||
if !controller.WaitForCacheSync("disruption", stopCh, dc.podListerSynced, dc.pdbListerSynced, dc.rcListerSynced, dc.rsListerSynced, dc.dListerSynced, dc.ssListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
if dc.kubeClient != nil {
|
||||
glog.Infof("Sending events to api server.")
|
||||
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(dc.kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
} else {
|
||||
glog.Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
go wait.Until(dc.worker, time.Second, stopCh)
|
||||
go wait.Until(dc.recheckWorker, time.Second, stopCh)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) addDb(obj interface{}) {
|
||||
pdb := obj.(*policy.PodDisruptionBudget)
|
||||
glog.V(4).Infof("add DB %q", pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updateDb(old, cur interface{}) {
|
||||
// TODO(mml) ignore updates where 'old' is equivalent to 'cur'.
|
||||
pdb := cur.(*policy.PodDisruptionBudget)
|
||||
glog.V(4).Infof("update DB %q", pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) removeDb(obj interface{}) {
|
||||
pdb := obj.(*policy.PodDisruptionBudget)
|
||||
glog.V(4).Infof("remove DB %q", pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) addPod(obj interface{}) {
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("addPod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("addPod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updatePod(old, cur interface{}) {
|
||||
pod := cur.(*v1.Pod)
|
||||
glog.V(4).Infof("updatePod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("updatePod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the pod
|
||||
// changed labels the new ReplicaSet will not be woken up till the periodic
|
||||
// resync.
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("deletePod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
glog.V(4).Infof("No matching pdb for pod %q", pod.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name)
|
||||
dc.enqueuePdb(pdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) {
|
||||
key, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
glog.Errorf("Cound't get key for PodDisruptionBudget object %+v: %v", pdb, err)
|
||||
return
|
||||
}
|
||||
dc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBudget, delay time.Duration) {
|
||||
key, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
glog.Errorf("Cound't get key for PodDisruptionBudget object %+v: %v", pdb, err)
|
||||
return
|
||||
}
|
||||
dc.recheckQueue.AddAfter(key, delay)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionBudget {
|
||||
// GetPodPodDisruptionBudgets returns an error only if no
|
||||
// PodDisruptionBudgets are found. We don't return that as an error to the
|
||||
// caller.
|
||||
pdbs, err := dc.pdbLister.GetPodPodDisruptionBudgets(pod)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("No PodDisruptionBudgets found for pod %v, PodDisruptionBudget controller will avoid syncing.", pod.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(pdbs) > 1 {
|
||||
msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name)
|
||||
glog.Warning(msg)
|
||||
dc.recorder.Event(pod, v1.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
|
||||
}
|
||||
return pdbs[0]
|
||||
}
|
||||
|
||||
// This function returns pods using the PodDisruptionBudget object.
|
||||
// IMPORTANT NOTE : the returned pods should NOT be modified.
|
||||
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
|
||||
sel, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
|
||||
if sel.Empty() {
|
||||
return []*v1.Pod{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) worker() {
|
||||
for dc.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) processNextWorkItem() bool {
|
||||
dKey, quit := dc.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer dc.queue.Done(dKey)
|
||||
|
||||
err := dc.sync(dKey.(string))
|
||||
if err == nil {
|
||||
dc.queue.Forget(dKey)
|
||||
return true
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("Error syncing PodDisruptionBudget %v, requeuing: %v", dKey.(string), err))
|
||||
dc.queue.AddRateLimited(dKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) recheckWorker() {
|
||||
for dc.processNextRecheckWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) processNextRecheckWorkItem() bool {
|
||||
dKey, quit := dc.recheckQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer dc.recheckQueue.Done(dKey)
|
||||
dc.queue.AddRateLimited(dKey)
|
||||
return true
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) sync(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Now().Sub(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pdb, err := dc.pdbLister.PodDisruptionBudgets(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("PodDisruptionBudget %q has been deleted", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dc.trySync(pdb); err != nil {
|
||||
glog.Errorf("Failed to sync pdb %s/%s: %v", pdb.Namespace, pdb.Name, err)
|
||||
return dc.failSafe(pdb)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
|
||||
pods, err := dc.getPodsForPdb(pdb)
|
||||
if err != nil {
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "NoPods", "Failed to get pods: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeNormal, "NoPods", "No matching pods found")
|
||||
}
|
||||
|
||||
expectedCount, desiredHealthy, err := dc.getExpectedPodCount(pdb, pods)
|
||||
if err != nil {
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "CalculateExpectedPodCountFailed", "Failed to calculate the number of expected pods: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
currentTime := time.Now()
|
||||
disruptedPods, recheckTime := dc.buildDisruptedPodMap(pods, pdb, currentTime)
|
||||
currentHealthy := countHealthyPods(pods, disruptedPods, currentTime)
|
||||
err = dc.updatePdbStatus(pdb, currentHealthy, desiredHealthy, expectedCount, disruptedPods)
|
||||
|
||||
if err == nil && recheckTime != nil {
|
||||
// There is always at most one PDB waiting with a particular name in the queue,
|
||||
// and each PDB in the queue is associated with the lowest timestamp
|
||||
// that was supplied when a PDB with that name was added.
|
||||
dc.enqueuePdbForRecheck(pdb, recheckTime.Sub(currentTime))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount, desiredHealthy int32, err error) {
|
||||
err = nil
|
||||
// TODO(davidopp): consider making the way expectedCount and rules about
|
||||
// permitted controller configurations (specifically, considering it an error
|
||||
// if a pod covered by a PDB has 0 controllers or > 1 controller) should be
|
||||
// handled the same way for integer and percentage minAvailable
|
||||
|
||||
if pdb.Spec.MaxUnavailable != nil {
|
||||
expectedCount, err = dc.getExpectedScale(pdb, pods)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var maxUnavailable int
|
||||
maxUnavailable, err = intstr.GetValueFromIntOrPercent(pdb.Spec.MaxUnavailable, int(expectedCount), true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
desiredHealthy = expectedCount - int32(maxUnavailable)
|
||||
if desiredHealthy < 0 {
|
||||
desiredHealthy = 0
|
||||
}
|
||||
} else if pdb.Spec.MinAvailable != nil {
|
||||
if pdb.Spec.MinAvailable.Type == intstr.Int {
|
||||
desiredHealthy = pdb.Spec.MinAvailable.IntVal
|
||||
expectedCount = int32(len(pods))
|
||||
} else if pdb.Spec.MinAvailable.Type == intstr.String {
|
||||
expectedCount, err = dc.getExpectedScale(pdb, pods)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var minAvailable int
|
||||
minAvailable, err = intstr.GetValueFromIntOrPercent(pdb.Spec.MinAvailable, int(expectedCount), true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
desiredHealthy = int32(minAvailable)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getExpectedScale(pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount int32, err error) {
|
||||
// When the user specifies a fraction of pods that must be available, we
|
||||
// use as the fraction's denominator
|
||||
// SUM_{all c in C} scale(c)
|
||||
// where C is the union of C_p1, C_p2, ..., C_pN
|
||||
// and each C_pi is the set of controllers controlling the pod pi
|
||||
|
||||
// k8s only defines what will happens when 0 or 1 controllers control a
|
||||
// given pod. We explicitly exclude the 0 controllers case here, and we
|
||||
// report an error if we find a pod with more than 1 controller. Thus in
|
||||
// practice each C_pi is a set of exactly 1 controller.
|
||||
|
||||
// A mapping from controllers to their scale.
|
||||
controllerScale := map[types.UID]int32{}
|
||||
|
||||
// 1. Find the controller for each pod. If any pod has 0 controllers,
|
||||
// that's an error. With ControllerRef, a pod can only have 1 controller.
|
||||
for _, pod := range pods {
|
||||
foundController := false
|
||||
for _, finder := range dc.finders() {
|
||||
var controllerNScale *controllerAndScale
|
||||
controllerNScale, err = finder(pod)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if controllerNScale != nil {
|
||||
controllerScale[controllerNScale.UID] = controllerNScale.scale
|
||||
foundController = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundController {
|
||||
err = fmt.Errorf("found no controllers for pod %q", pod.Name)
|
||||
dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllers", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Add up all the controllers.
|
||||
expectedCount = 0
|
||||
for _, count := range controllerScale {
|
||||
expectedCount += count
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]metav1.Time, currentTime time.Time) (currentHealthy int32) {
|
||||
Pod:
|
||||
for _, pod := range pods {
|
||||
// Pod is beeing deleted.
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
// Pod is expected to be deleted soon.
|
||||
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
|
||||
continue
|
||||
}
|
||||
if podutil.IsPodReady(pod) {
|
||||
currentHealthy++
|
||||
continue Pod
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted
|
||||
// or not-deleted at all items. Also returns an information when this check should be repeated.
|
||||
func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]metav1.Time, *time.Time) {
|
||||
disruptedPods := pdb.Status.DisruptedPods
|
||||
result := make(map[string]metav1.Time)
|
||||
var recheckTime *time.Time
|
||||
|
||||
if disruptedPods == nil || len(disruptedPods) == 0 {
|
||||
return result, recheckTime
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
// Already being deleted.
|
||||
continue
|
||||
}
|
||||
disruptionTime, found := disruptedPods[pod.Name]
|
||||
if !found {
|
||||
// Pod not on the list.
|
||||
continue
|
||||
}
|
||||
expectedDeletion := disruptionTime.Time.Add(DeletionTimeout)
|
||||
if expectedDeletion.Before(currentTime) {
|
||||
glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
|
||||
pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name)
|
||||
dc.recorder.Eventf(pod, v1.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
|
||||
pdb.Namespace, pdb.Namespace)
|
||||
} else {
|
||||
if recheckTime == nil || expectedDeletion.Before(*recheckTime) {
|
||||
recheckTime = &expectedDeletion
|
||||
}
|
||||
result[pod.Name] = disruptionTime
|
||||
}
|
||||
}
|
||||
return result, recheckTime
|
||||
}
|
||||
|
||||
// failSafe is an attempt to at least update the PodDisruptionsAllowed field to
|
||||
// 0 if everything else has failed. This is one place we
|
||||
// implement the "fail open" part of the design since if we manage to update
|
||||
// this field correctly, we will prevent the /evict handler from approving an
|
||||
// eviction when it may be unsafe to do so.
|
||||
func (dc *DisruptionController) failSafe(pdb *policy.PodDisruptionBudget) error {
|
||||
newPdb := pdb.DeepCopy()
|
||||
newPdb.Status.PodDisruptionsAllowed = 0
|
||||
return dc.getUpdater()(newPdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget, currentHealthy, desiredHealthy, expectedCount int32,
|
||||
disruptedPods map[string]metav1.Time) error {
|
||||
|
||||
// We require expectedCount to be > 0 so that PDBs which currently match no
|
||||
// pods are in a safe state when their first pods appear but this controller
|
||||
// has not updated their status yet. This isn't the only race, but it's a
|
||||
// common one that's easy to detect.
|
||||
disruptionsAllowed := currentHealthy - desiredHealthy
|
||||
if expectedCount <= 0 || disruptionsAllowed <= 0 {
|
||||
disruptionsAllowed = 0
|
||||
}
|
||||
|
||||
if pdb.Status.CurrentHealthy == currentHealthy &&
|
||||
pdb.Status.DesiredHealthy == desiredHealthy &&
|
||||
pdb.Status.ExpectedPods == expectedCount &&
|
||||
pdb.Status.PodDisruptionsAllowed == disruptionsAllowed &&
|
||||
reflect.DeepEqual(pdb.Status.DisruptedPods, disruptedPods) &&
|
||||
pdb.Status.ObservedGeneration == pdb.Generation {
|
||||
return nil
|
||||
}
|
||||
|
||||
newPdb := pdb.DeepCopy()
|
||||
newPdb.Status = policy.PodDisruptionBudgetStatus{
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedCount,
|
||||
PodDisruptionsAllowed: disruptionsAllowed,
|
||||
DisruptedPods: disruptedPods,
|
||||
ObservedGeneration: pdb.Generation,
|
||||
}
|
||||
|
||||
return dc.getUpdater()(newPdb)
|
||||
}
|
||||
|
||||
// refresh tries to re-GET the given PDB. If there are any errors, it just
|
||||
// returns the old PDB. Intended to be used in a retry loop where it runs a
|
||||
// bounded number of times.
|
||||
func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy.PodDisruptionBudget) *policy.PodDisruptionBudget {
|
||||
newPdb, err := pdbClient.Get(pdb.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return newPdb
|
||||
} else {
|
||||
return pdb
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) writePdbStatus(pdb *policy.PodDisruptionBudget) error {
|
||||
pdbClient := dc.kubeClient.PolicyV1beta1().PodDisruptionBudgets(pdb.Namespace)
|
||||
st := pdb.Status
|
||||
|
||||
var err error
|
||||
for i, pdb := 0, pdb; i < statusUpdateRetries; i, pdb = i+1, refresh(pdbClient, pdb) {
|
||||
pdb.Status = st
|
||||
if _, err = pdbClient.UpdateStatus(pdb); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
740
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption_test.go
generated
vendored
Normal file
740
vendor/k8s.io/kubernetes/pkg/controller/disruption/disruption_test.go
generated
vendored
Normal file
@ -0,0 +1,740 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package disruption
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
)
|
||||
|
||||
type pdbStates map[string]policy.PodDisruptionBudget
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func (ps *pdbStates) Set(pdb *policy.PodDisruptionBudget) error {
|
||||
key, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*ps)[key] = *pdb.DeepCopy()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
|
||||
return (*ps)[key]
|
||||
}
|
||||
|
||||
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32,
|
||||
disruptedPodMap map[string]metav1.Time) {
|
||||
actualPDB := ps.Get(key)
|
||||
expectedStatus := policy.PodDisruptionBudgetStatus{
|
||||
PodDisruptionsAllowed: disruptionsAllowed,
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedPods,
|
||||
DisruptedPods: disruptedPodMap,
|
||||
ObservedGeneration: actualPDB.Generation,
|
||||
}
|
||||
actualStatus := actualPDB.Status
|
||||
if !reflect.DeepEqual(actualStatus, expectedStatus) {
|
||||
debug.PrintStack()
|
||||
t.Fatalf("PDB %q status mismatch. Expected %+v but got %+v.", key, expectedStatus, actualStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *pdbStates) VerifyDisruptionAllowed(t *testing.T, key string, disruptionsAllowed int32) {
|
||||
pdb := ps.Get(key)
|
||||
if pdb.Status.PodDisruptionsAllowed != disruptionsAllowed {
|
||||
debug.PrintStack()
|
||||
t.Fatalf("PodDisruptionAllowed mismatch for PDB %q. Expected %v but got %v.", key, disruptionsAllowed, pdb.Status.PodDisruptionsAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
type disruptionController struct {
|
||||
*DisruptionController
|
||||
|
||||
podStore cache.Store
|
||||
pdbStore cache.Store
|
||||
rcStore cache.Store
|
||||
rsStore cache.Store
|
||||
dStore cache.Store
|
||||
ssStore cache.Store
|
||||
}
|
||||
|
||||
func newFakeDisruptionController() (*disruptionController, *pdbStates) {
|
||||
ps := &pdbStates{}
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc())
|
||||
|
||||
dc := NewDisruptionController(
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Core().V1().ReplicationControllers(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets(),
|
||||
informerFactory.Extensions().V1beta1().Deployments(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets(),
|
||||
nil,
|
||||
)
|
||||
dc.getUpdater = func() updater { return ps.Set }
|
||||
dc.podListerSynced = alwaysReady
|
||||
dc.pdbListerSynced = alwaysReady
|
||||
dc.rcListerSynced = alwaysReady
|
||||
dc.rsListerSynced = alwaysReady
|
||||
dc.dListerSynced = alwaysReady
|
||||
dc.ssListerSynced = alwaysReady
|
||||
|
||||
return &disruptionController{
|
||||
dc,
|
||||
informerFactory.Core().V1().Pods().Informer().GetStore(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Informer().GetStore(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Informer().GetStore(),
|
||||
informerFactory.Extensions().V1beta1().ReplicaSets().Informer().GetStore(),
|
||||
informerFactory.Extensions().V1beta1().Deployments().Informer().GetStore(),
|
||||
informerFactory.Apps().V1beta1().StatefulSets().Informer().GetStore(),
|
||||
}, ps
|
||||
}
|
||||
|
||||
func fooBar() map[string]string {
|
||||
return map[string]string{"foo": "bar"}
|
||||
}
|
||||
|
||||
func newSel(labels map[string]string) *metav1.LabelSelector {
|
||||
return &metav1.LabelSelector{MatchLabels: labels}
|
||||
}
|
||||
|
||||
func newSelFooBar() *metav1.LabelSelector {
|
||||
return newSel(map[string]string{"foo": "bar"})
|
||||
}
|
||||
|
||||
func newMinAvailablePodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
|
||||
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &minAvailable,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
|
||||
pdbName, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
|
||||
}
|
||||
|
||||
return pdb, pdbName
|
||||
}
|
||||
|
||||
func newMaxUnavailablePodDisruptionBudget(t *testing.T, maxUnavailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
|
||||
pdbName, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error naming pdb %q: %v", pdb.Name, err)
|
||||
}
|
||||
|
||||
return pdb, pdbName
|
||||
}
|
||||
|
||||
func updatePodOwnerToRc(t *testing.T, pod *v1.Pod, rc *v1.ReplicationController) {
|
||||
var controllerReference metav1.OwnerReference
|
||||
var trueVar = true
|
||||
controllerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: controllerKindRC.GroupVersion().String(), Kind: controllerKindRC.Kind, Name: rc.Name, Controller: &trueVar}
|
||||
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
|
||||
}
|
||||
|
||||
func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *extensions.ReplicaSet) {
|
||||
var controllerReference metav1.OwnerReference
|
||||
var trueVar = true
|
||||
controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: controllerKindRS.GroupVersion().String(), Kind: controllerKindRS.Kind, Name: rs.Name, Controller: &trueVar}
|
||||
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
|
||||
}
|
||||
|
||||
// pod, podName := newPod(t, name)
|
||||
func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
|
||||
var controllerReference metav1.OwnerReference
|
||||
var trueVar = true
|
||||
controllerReference = metav1.OwnerReference{UID: ss.UID, APIVersion: controllerKindSS.GroupVersion().String(), Kind: controllerKindSS.Kind, Name: ss.Name, Controller: &trueVar}
|
||||
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
|
||||
}
|
||||
|
||||
func newPod(t *testing.T, name string) (*v1.Pod, string) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: make(map[string]string),
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
podName, err := controller.KeyFunc(pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err)
|
||||
}
|
||||
|
||||
return pod, podName
|
||||
}
|
||||
|
||||
func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &size,
|
||||
Selector: fooBar(),
|
||||
},
|
||||
}
|
||||
|
||||
rcName, err := controller.KeyFunc(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error naming RC %q", rc.Name)
|
||||
}
|
||||
|
||||
return rc, rcName
|
||||
}
|
||||
|
||||
func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
d := &extensions.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &size,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
|
||||
dName, err := controller.KeyFunc(d)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error naming Deployment %q: %v", d.Name, err)
|
||||
}
|
||||
|
||||
return d, dName
|
||||
}
|
||||
|
||||
func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: &size,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
|
||||
rsName, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error naming ReplicaSet %q: %v", rs.Name, err)
|
||||
}
|
||||
|
||||
return rs, rsName
|
||||
}
|
||||
|
||||
func newStatefulSet(t *testing.T, size int32) (*apps.StatefulSet, string) {
|
||||
ss := &apps.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Replicas: &size,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
|
||||
ssName, err := controller.KeyFunc(ss)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error naming StatefulSet %q: %v", ss.Name, err)
|
||||
}
|
||||
|
||||
return ss, ssName
|
||||
}
|
||||
|
||||
func update(t *testing.T, store cache.Store, obj interface{}) {
|
||||
if err := store.Update(obj); err != nil {
|
||||
t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
|
||||
}
|
||||
}
|
||||
|
||||
func add(t *testing.T, store cache.Store, obj interface{}) {
|
||||
if err := store.Add(obj); err != nil {
|
||||
t.Fatalf("Could not add %+v to %+v: %v", obj, store, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create one with no selector. Verify it matches 0 pods.
|
||||
func TestNoSelector(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
|
||||
pdb.Spec.Selector = &metav1.LabelSelector{}
|
||||
pod, _ := newPod(t, "yo-yo-yo")
|
||||
|
||||
add(t, dc.pdbStore, pdb)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
|
||||
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]metav1.Time{})
|
||||
}
|
||||
|
||||
// Verify that available/expected counts go up as we add pods, then verify that
|
||||
// available count goes down when we make a pod unavailable.
|
||||
func TestUnavailable(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
dc.sync(pdbName)
|
||||
|
||||
// Add three pods, verifying that the counts go up at each step.
|
||||
pods := []*v1.Pod{}
|
||||
for i := int32(0); i < 4; i++ {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]metav1.Time{})
|
||||
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
||||
pods = append(pods, pod)
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
}
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]metav1.Time{})
|
||||
|
||||
// Now set one pod as unavailable
|
||||
pods[0].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podStore, pods[0])
|
||||
dc.sync(pdbName)
|
||||
|
||||
// Verify expected update
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4, map[string]metav1.Time{})
|
||||
}
|
||||
|
||||
// Verify that an integer MaxUnavailable won't
|
||||
// allow a disruption for pods with no controller.
|
||||
func TestIntegerMaxUnavailable(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(1))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
dc.sync(pdbName)
|
||||
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
|
||||
pod, _ := newPod(t, "naked")
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
}
|
||||
|
||||
// Verify that an integer MaxUnavailable will recompute allowed disruptions when the scale of
|
||||
// the selected pod's controller is modified.
|
||||
func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(2))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
|
||||
rs, _ := newReplicaSet(t, 7)
|
||||
add(t, dc.rsStore, rs)
|
||||
|
||||
pod, _ := newPod(t, "pod")
|
||||
updatePodOwnerToRs(t, pod, rs)
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 5, 7, map[string]metav1.Time{})
|
||||
|
||||
// Update scale of ReplicaSet and check PDB
|
||||
rs.Spec.Replicas = to.Int32Ptr(5)
|
||||
update(t, dc.rsStore, rs)
|
||||
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 3, 5, map[string]metav1.Time{})
|
||||
}
|
||||
|
||||
// Create a pod with no controller, and verify that a PDB with a percentage
|
||||
// specified won't allow a disruption.
|
||||
func TestNakedPod(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
dc.sync(pdbName)
|
||||
// This verifies that when a PDB has 0 pods, disruptions are not allowed.
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
|
||||
pod, _ := newPod(t, "naked")
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
}
|
||||
|
||||
// Verify that we count the scale of a ReplicaSet even when it has no Deployment.
|
||||
func TestReplicaSet(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("20%"))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
|
||||
rs, _ := newReplicaSet(t, 10)
|
||||
add(t, dc.rsStore, rs)
|
||||
|
||||
pod, _ := newPod(t, "pod")
|
||||
updatePodOwnerToRs(t, pod, rs)
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]metav1.Time{})
|
||||
}
|
||||
|
||||
// Verify that multiple controllers doesn't allow the PDB to be set true.
|
||||
func TestMultipleControllers(t *testing.T) {
|
||||
const rcCount = 2
|
||||
const podCount = 2
|
||||
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("1%"))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
for i := 0; i < podCount; i++ {
|
||||
pod, _ := newPod(t, fmt.Sprintf("pod %d", i))
|
||||
pods = append(pods, pod)
|
||||
add(t, dc.podStore, pod)
|
||||
}
|
||||
|
||||
dc.sync(pdbName)
|
||||
|
||||
// No controllers yet => no disruption allowed
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
|
||||
rc, _ := newReplicationController(t, 1)
|
||||
rc.Name = "rc 1"
|
||||
for i := 0; i < podCount; i++ {
|
||||
updatePodOwnerToRc(t, pods[i], rc)
|
||||
}
|
||||
add(t, dc.rcStore, rc)
|
||||
dc.sync(pdbName)
|
||||
// One RC and 200%>1% healthy => disruption allowed
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 1)
|
||||
|
||||
rc, _ = newReplicationController(t, 1)
|
||||
rc.Name = "rc 2"
|
||||
for i := 0; i < podCount; i++ {
|
||||
updatePodOwnerToRc(t, pods[i], rc)
|
||||
}
|
||||
add(t, dc.rcStore, rc)
|
||||
dc.sync(pdbName)
|
||||
|
||||
// 100%>1% healthy BUT two RCs => no disruption allowed
|
||||
// TODO: Find out if this assert is still needed
|
||||
//ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
}
|
||||
|
||||
func TestReplicationController(t *testing.T) {
|
||||
// The budget in this test matches foo=bar, but the RC and its pods match
|
||||
// {foo=bar, baz=quux}. Later, when we add a rogue pod with only a foo=bar
|
||||
// label, it will match the budget but have no controllers, which should
|
||||
// trigger the controller to set PodDisruptionAllowed to false.
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "quux",
|
||||
}
|
||||
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
// 34% should round up to 2
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
rc, _ := newReplicationController(t, 3)
|
||||
rc.Spec.Selector = labels
|
||||
add(t, dc.rcStore, rc)
|
||||
dc.sync(pdbName)
|
||||
|
||||
// It starts out at 0 expected because, with no pods, the PDB doesn't know
|
||||
// about the RC. This is a known bug. TODO(mml): file issue
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
for i := int32(0); i < 3; i++ {
|
||||
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
|
||||
updatePodOwnerToRc(t, pod, rc)
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = labels
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
if i < 2 {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
rogue, _ := newPod(t, "rogue")
|
||||
add(t, dc.podStore, rogue)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyDisruptionAllowed(t, pdbName, 0)
|
||||
}
|
||||
|
||||
func TestStatefulSetController(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "quux",
|
||||
}
|
||||
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
// 34% should round up to 2
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("34%"))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
ss, _ := newStatefulSet(t, 3)
|
||||
add(t, dc.ssStore, ss)
|
||||
dc.sync(pdbName)
|
||||
|
||||
// It starts out at 0 expected because, with no pods, the PDB doesn't know
|
||||
// about the SS. This is a known bug. TODO(mml): file issue
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
for i := int32(0); i < 3; i++ {
|
||||
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
|
||||
updatePodOwnerToSs(t, pod, ss)
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = labels
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
if i < 2 {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{})
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTwoControllers(t *testing.T) {
|
||||
// Most of this test is in verifying intermediate cases as we define the
|
||||
// three controllers and create the pods.
|
||||
rcLabels := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "quux",
|
||||
}
|
||||
dLabels := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "quuux",
|
||||
}
|
||||
dc, ps := newFakeDisruptionController()
|
||||
|
||||
// These constants are related, but I avoid calculating the correct values in
|
||||
// code. If you update a parameter here, recalculate the correct values for
|
||||
// all of them. Further down in the test, we use these to control loops, and
|
||||
// that level of logic is enough complexity for me.
|
||||
const collectionSize int32 = 11 // How big each collection is
|
||||
const minAvailable string = "28%" // minAvailable we'll specify
|
||||
const minimumOne int32 = 4 // integer minimum with one controller
|
||||
const minimumTwo int32 = 7 // integer minimum with two controllers
|
||||
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromString("28%"))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
rc, _ := newReplicationController(t, collectionSize)
|
||||
rc.Spec.Selector = rcLabels
|
||||
add(t, dc.rcStore, rc)
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{})
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
unavailablePods := collectionSize - minimumOne - 1
|
||||
for i := int32(1); i <= collectionSize; i++ {
|
||||
pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
|
||||
updatePodOwnerToRc(t, pod, rc)
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = rcLabels
|
||||
if i <= unavailablePods {
|
||||
pod.Status.Conditions = []v1.PodCondition{}
|
||||
}
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
if i <= unavailablePods {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize, map[string]metav1.Time{})
|
||||
} else if i-unavailablePods <= minimumOne {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize, map[string]metav1.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
d, _ := newDeployment(t, collectionSize)
|
||||
d.Spec.Selector = newSel(dLabels)
|
||||
add(t, dc.dStore, d)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
|
||||
|
||||
rs, _ := newReplicaSet(t, collectionSize)
|
||||
rs.Spec.Selector = newSel(dLabels)
|
||||
rs.Labels = dLabels
|
||||
add(t, dc.rsStore, rs)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]metav1.Time{})
|
||||
|
||||
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
|
||||
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
|
||||
for i := int32(1); i <= collectionSize; i++ {
|
||||
pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
|
||||
updatePodOwnerToRs(t, pod, rs)
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = dLabels
|
||||
if i <= unavailablePods {
|
||||
pod.Status.Conditions = []v1.PodCondition{}
|
||||
}
|
||||
add(t, dc.podStore, pod)
|
||||
dc.sync(pdbName)
|
||||
if i <= unavailablePods {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
|
||||
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
|
||||
(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]metav1.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
// Now we verify we can bring down 1 pod and a disruption is still permitted,
|
||||
// but if we bring down two, it's not. Then we make the pod ready again and
|
||||
// verify that a disruption is permitted again.
|
||||
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
|
||||
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podStore, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
|
||||
|
||||
pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podStore, pods[collectionSize-2])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
|
||||
|
||||
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
|
||||
update(t, dc.podStore, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]metav1.Time{})
|
||||
}
|
||||
|
||||
// Test pdb doesn't exist
|
||||
func TestPDBNotExist(t *testing.T) {
|
||||
dc, _ := newFakeDisruptionController()
|
||||
pdb, _ := newMinAvailablePodDisruptionBudget(t, intstr.FromString("67%"))
|
||||
add(t, dc.pdbStore, pdb)
|
||||
if err := dc.sync("notExist"); err != nil {
|
||||
t.Errorf("Unexpected error: %v, expect nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateDisruptedPods(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb-queue")
|
||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
|
||||
currentTime := time.Now()
|
||||
pdb.Status.DisruptedPods = map[string]metav1.Time{
|
||||
"p1": {Time: currentTime}, // Should be removed, pod deletion started.
|
||||
"p2": {Time: currentTime.Add(-5 * time.Minute)}, // Should be removed, expired.
|
||||
"p3": {Time: currentTime}, // Should remain, pod untouched.
|
||||
"notthere": {Time: currentTime}, // Should be removed, pod deleted.
|
||||
}
|
||||
add(t, dc.pdbStore, pdb)
|
||||
|
||||
pod1, _ := newPod(t, "p1")
|
||||
pod1.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
pod2, _ := newPod(t, "p2")
|
||||
pod3, _ := newPod(t, "p3")
|
||||
|
||||
add(t, dc.podStore, pod1)
|
||||
add(t, dc.podStore, pod2)
|
||||
add(t, dc.podStore, pod3)
|
||||
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]metav1.Time{"p3": {Time: currentTime}})
|
||||
}
|
Reference in New Issue
Block a user