mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
95
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
95
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
@ -16,7 +16,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/daemon",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon/util:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
@ -26,34 +25,35 @@ go_library(
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -61,6 +61,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"daemon_controller_test.go",
|
||||
"main_test.go",
|
||||
"update_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@ -70,26 +71,28 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/daemon/OWNERS
generated
vendored
@ -1,8 +1,11 @@
|
||||
approvers:
|
||||
- mikedanese
|
||||
- janetkuo
|
||||
reviewers:
|
||||
- janetkuo
|
||||
- lukaszo
|
||||
- mikedanese
|
||||
- tnozicka
|
||||
- k82cn
|
||||
labels:
|
||||
- sig/apps
|
||||
|
234
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
234
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
@ -23,10 +23,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -46,10 +47,10 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/integer"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -67,6 +68,9 @@ const (
|
||||
|
||||
// StatusUpdateRetries limits the number of retries if sending a status update to API server fails.
|
||||
StatusUpdateRetries = 1
|
||||
|
||||
// BackoffGCInterval is the time that has to pass before next iteration of backoff GC is run
|
||||
BackoffGCInterval = 1 * time.Minute
|
||||
)
|
||||
|
||||
// Reasons for DaemonSet events
|
||||
@ -131,12 +135,21 @@ type DaemonSetsController struct {
|
||||
// is DaemonSet set that want to run pods but can't schedule in latest syncup cycle.
|
||||
suspendedDaemonPodsMutex sync.Mutex
|
||||
suspendedDaemonPods map[string]sets.String
|
||||
|
||||
failedPodsBackoff *flowcontrol.Backoff
|
||||
}
|
||||
|
||||
// NewDaemonSetsController creates a new DaemonSetsController
|
||||
func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
|
||||
func NewDaemonSetsController(
|
||||
daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
historyInformer appsinformers.ControllerRevisionInformer,
|
||||
podInformer coreinformers.PodInformer,
|
||||
nodeInformer coreinformers.NodeInformer,
|
||||
kubeClient clientset.Interface,
|
||||
failedPodsBackoff *flowcontrol.Backoff,
|
||||
) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
@ -163,13 +176,13 @@ func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
klog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldDS := old.(*apps.DaemonSet)
|
||||
curDS := cur.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
},
|
||||
DeleteFunc: dsc.deleteDaemonset,
|
||||
@ -212,6 +225,9 @@ func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
dsc.syncHandler = dsc.syncDaemonSet
|
||||
dsc.enqueueDaemonSet = dsc.enqueue
|
||||
dsc.enqueueDaemonSetRateLimited = dsc.enqueueRateLimited
|
||||
|
||||
dsc.failedPodsBackoff = failedPodsBackoff
|
||||
|
||||
return dsc, nil
|
||||
}
|
||||
|
||||
@ -241,7 +257,7 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
klog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
@ -250,8 +266,8 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer dsc.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting daemon sets controller")
|
||||
defer glog.Infof("Shutting down daemon sets controller")
|
||||
klog.Infof("Starting daemon sets controller")
|
||||
defer klog.Infof("Shutting down daemon sets controller")
|
||||
|
||||
if !controller.WaitForCacheSync("daemon sets", stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) {
|
||||
return
|
||||
@ -261,6 +277,8 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
|
||||
go wait.Until(dsc.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
go wait.Until(dsc.failedPodsBackoff.GC, BackoffGCInterval, stopCh)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
@ -345,7 +363,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.Controlle
|
||||
if len(daemonSets) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
glog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v",
|
||||
klog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v",
|
||||
history.Namespace, history.Name, history.Labels)
|
||||
}
|
||||
return daemonSets
|
||||
@ -368,7 +386,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ControllerRevision %s added.", history.Name)
|
||||
klog.V(4).Infof("ControllerRevision %s added.", history.Name)
|
||||
return
|
||||
}
|
||||
|
||||
@ -378,7 +396,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) {
|
||||
if len(daemonSets) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name)
|
||||
klog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name)
|
||||
for _, ds := range daemonSets {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@ -411,7 +429,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name)
|
||||
klog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
return
|
||||
}
|
||||
@ -424,7 +442,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) {
|
||||
if len(daemonSets) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name)
|
||||
klog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name)
|
||||
for _, ds := range daemonSets {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@ -463,7 +481,7 @@ func (dsc *DaemonSetsController) deleteHistory(obj interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("ControllerRevision %s deleted.", history.Name)
|
||||
klog.V(4).Infof("ControllerRevision %s deleted.", history.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
|
||||
@ -487,7 +505,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s added.", pod.Name)
|
||||
klog.V(4).Infof("Pod %s added.", pod.Name)
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
return
|
||||
@ -501,7 +519,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
if len(dss) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s added.", pod.Name)
|
||||
klog.V(4).Infof("Orphan Pod %s added.", pod.Name)
|
||||
for _, ds := range dss {
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@ -535,7 +553,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
if ds == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s updated.", curPod.Name)
|
||||
klog.V(4).Infof("Pod %s updated.", curPod.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod)
|
||||
// See https://github.com/kubernetes/kubernetes/pull/38076 for more details
|
||||
@ -553,7 +571,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
if len(dss) == 0 {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Pod %s updated.", curPod.Name)
|
||||
klog.V(4).Infof("Orphan Pod %s updated.", curPod.Name)
|
||||
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
for _, ds := range dss {
|
||||
@ -584,10 +602,10 @@ func (dsc *DaemonSetsController) requeueSuspendedDaemonPods(node string) {
|
||||
dss := dsc.listSuspendedDaemonPods(node)
|
||||
for _, dsKey := range dss {
|
||||
if ns, name, err := cache.SplitMetaNamespaceKey(dsKey); err != nil {
|
||||
glog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err)
|
||||
klog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err)
|
||||
continue
|
||||
} else if ds, err := dsc.dsLister.DaemonSets(ns).Get(name); err != nil {
|
||||
glog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err)
|
||||
klog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err)
|
||||
continue
|
||||
} else {
|
||||
dsc.enqueueDaemonSetRateLimited(ds)
|
||||
@ -664,7 +682,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
klog.V(4).Infof("Pod %s deleted.", pod.Name)
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
}
|
||||
@ -673,7 +691,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
klog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
return
|
||||
}
|
||||
node := obj.(*v1.Node)
|
||||
@ -717,20 +735,25 @@ func nodeInSameCondition(old []v1.NodeCondition, cur []v1.NodeCondition) bool {
|
||||
return len(c1map) == 0
|
||||
}
|
||||
|
||||
func shouldIgnoreNodeUpdate(oldNode, curNode v1.Node) bool {
|
||||
if !nodeInSameCondition(oldNode.Status.Conditions, curNode.Status.Conditions) {
|
||||
return false
|
||||
}
|
||||
oldNode.ResourceVersion = curNode.ResourceVersion
|
||||
oldNode.Status.Conditions = curNode.Status.Conditions
|
||||
return apiequality.Semantic.DeepEqual(oldNode, curNode)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
oldNode := old.(*v1.Node)
|
||||
curNode := cur.(*v1.Node)
|
||||
|
||||
if reflect.DeepEqual(oldNode.Labels, curNode.Labels) &&
|
||||
reflect.DeepEqual(oldNode.Spec.Taints, curNode.Spec.Taints) &&
|
||||
nodeInSameCondition(oldNode.Status.Conditions, curNode.Status.Conditions) {
|
||||
// If node labels, taints and condition didn't change, we can ignore this update.
|
||||
if shouldIgnoreNodeUpdate(*oldNode, *curNode) {
|
||||
return
|
||||
}
|
||||
|
||||
dsList, err := dsc.dsLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Error listing daemon sets: %v", err)
|
||||
klog.V(4).Infof("Error listing daemon sets: %v", err)
|
||||
return
|
||||
}
|
||||
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
|
||||
@ -797,7 +820,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[s
|
||||
for _, pod := range claimedPods {
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
|
||||
klog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
|
||||
pod.Namespace, pod.Name, ds.Namespace, ds.Name)
|
||||
continue
|
||||
}
|
||||
@ -847,6 +870,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
dsKey, _ := cache.MetaNamespaceKeyFunc(ds)
|
||||
|
||||
dsc.removeSuspendedDaemonPods(node.Name, dsKey)
|
||||
|
||||
switch {
|
||||
@ -865,12 +889,29 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
failedPodsObserved++
|
||||
|
||||
// This is a critical place where DS is often fighting with kubelet that rejects pods.
|
||||
// We need to avoid hot looping and backoff.
|
||||
backoffKey := failedPodsBackoffKey(ds, node.Name)
|
||||
|
||||
now := dsc.failedPodsBackoff.Clock.Now()
|
||||
inBackoff := dsc.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, now)
|
||||
if inBackoff {
|
||||
delay := dsc.failedPodsBackoff.Get(backoffKey)
|
||||
klog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining",
|
||||
pod.Namespace, pod.Name, node.Name, delay)
|
||||
dsc.enqueueDaemonSetAfter(ds, delay)
|
||||
continue
|
||||
}
|
||||
|
||||
dsc.failedPodsBackoff.Next(backoffKey, now)
|
||||
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
glog.V(2).Infof(msg)
|
||||
klog.V(2).Infof(msg)
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
failedPodsObserved++
|
||||
} else {
|
||||
daemonPodsRunning = append(daemonPodsRunning, pod)
|
||||
}
|
||||
@ -962,7 +1003,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
// error channel to communicate back failures. make the buffer big enough to avoid any blocking
|
||||
errCh := make(chan error, createDiff+deleteDiff)
|
||||
|
||||
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
klog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
createWait := sync.WaitGroup{}
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
@ -970,7 +1011,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
template := util.CreatePodTemplate(ds.Spec.Template, generation, hash)
|
||||
template := util.CreatePodTemplate(ds.Namespace, ds.Spec.Template, generation, hash)
|
||||
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
|
||||
// and double with each successful iteration in a kind of "slow start".
|
||||
// This handles attempts to start large numbers of pods that would
|
||||
@ -997,7 +1038,6 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
// should be no conflicting node affinity with the target node.
|
||||
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
|
||||
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
|
||||
podTemplate.Spec.Tolerations = util.AppendNoScheduleTolerationIfNotExist(podTemplate.Spec.Tolerations)
|
||||
|
||||
err = dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
|
||||
ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
@ -1017,7 +1057,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
@ -1028,7 +1068,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
// any skipped pods that we never attempted to start shouldn't be expected.
|
||||
skippedPods := createDiff - batchSize
|
||||
if errorCount < len(errCh) && skippedPods > 0 {
|
||||
glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
|
||||
klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name)
|
||||
for i := 0; i < skippedPods; i++ {
|
||||
dsc.expectations.CreationObserved(dsKey)
|
||||
}
|
||||
@ -1038,14 +1078,14 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
|
||||
klog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff)
|
||||
deleteWait := sync.WaitGroup{}
|
||||
deleteWait.Add(deleteDiff)
|
||||
for i := 0; i < deleteDiff; i++ {
|
||||
go func(ix int) {
|
||||
defer deleteWait.Done()
|
||||
if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil {
|
||||
glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
klog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name)
|
||||
dsc.expectations.DeletionObserved(dsKey)
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
@ -1063,7 +1103,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
|
||||
func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int, updateObservedGen bool) error {
|
||||
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
|
||||
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
|
||||
int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
|
||||
@ -1079,7 +1119,9 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
|
||||
|
||||
var updateErr, getErr error
|
||||
for i := 0; i < StatusUpdateRetries; i++ {
|
||||
toUpdate.Status.ObservedGeneration = ds.Generation
|
||||
if updateObservedGen {
|
||||
toUpdate.Status.ObservedGeneration = ds.Generation
|
||||
}
|
||||
toUpdate.Status.DesiredNumberScheduled = int32(desiredNumberScheduled)
|
||||
toUpdate.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
|
||||
toUpdate.Status.NumberMisscheduled = int32(numberMisscheduled)
|
||||
@ -1102,8 +1144,8 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.
|
||||
return updateErr
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string) error {
|
||||
glog.V(4).Infof("Updating daemon set status")
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string, updateObservedGen bool) error {
|
||||
klog.V(4).Infof("Updating daemon set status")
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@ -1155,7 +1197,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
|
||||
}
|
||||
numberUnavailable := desiredNumberScheduled - numberAvailable
|
||||
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
@ -1166,7 +1208,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
|
||||
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -1175,7 +1217,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
}
|
||||
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(3).Infof("daemon set has been deleted %v", key)
|
||||
klog.V(3).Infof("daemon set has been deleted %v", key)
|
||||
dsc.expectations.DeleteExpectations(key)
|
||||
return nil
|
||||
}
|
||||
@ -1217,8 +1259,8 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
||||
if !dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
// Only update status.
|
||||
return dsc.updateDaemonSetStatus(ds, hash)
|
||||
// Only update status. Don't raise observedGeneration since controller didn't process object of that generation.
|
||||
return dsc.updateDaemonSetStatus(ds, hash, false)
|
||||
}
|
||||
|
||||
err = dsc.manage(ds, hash)
|
||||
@ -1243,55 +1285,10 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
return dsc.updateDaemonSetStatus(ds, hash)
|
||||
return dsc.updateDaemonSetStatus(ds, hash, true)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint unreachable:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
||||
// According to TaintNodesByCondition, all DaemonSet pods should tolerate
|
||||
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
|
||||
// OutOfDisk taint additional.
|
||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCriticalPod(newPod) {
|
||||
v1helper.AddOrUpdateTolerationInPod(newPod, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
|
||||
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -1343,7 +1340,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
|
||||
reasons, nodeInfo, err := dsc.simulate(newPod, node, ds)
|
||||
if err != nil {
|
||||
glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
|
||||
klog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
|
||||
return false, false, false, err
|
||||
}
|
||||
|
||||
@ -1352,7 +1349,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
// into one result, e.g. selectedNode.
|
||||
var insufficientResourceErr error
|
||||
for _, r := range reasons {
|
||||
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
klog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
switch reason := r.(type) {
|
||||
case *predicates.InsufficientResourceError:
|
||||
insufficientResourceErr = reason
|
||||
@ -1395,10 +1392,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
case
|
||||
predicates.ErrPodAffinityNotMatch,
|
||||
predicates.ErrServiceAffinityViolated:
|
||||
glog.Warningf("unexpected predicate failure reason: %s", reason.GetReason())
|
||||
klog.Warningf("unexpected predicate failure reason: %s", reason.GetReason())
|
||||
return false, false, false, fmt.Errorf("unexpected reason: DaemonSet Predicates should not return reason %s", reason.GetReason())
|
||||
default:
|
||||
glog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason())
|
||||
klog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason())
|
||||
wantToRun, shouldSchedule, shouldContinueRunning = false, false, false
|
||||
emitEvent = true
|
||||
}
|
||||
@ -1421,14 +1418,19 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
|
||||
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
|
||||
newPod.Namespace = ds.Namespace
|
||||
newPod.Spec.NodeName = nodeName
|
||||
|
||||
// Added default tolerations for DaemonSet pods.
|
||||
util.AddOrUpdateDaemonPodTolerations(&newPod.Spec, kubelettypes.IsCriticalPod(newPod))
|
||||
|
||||
return newPod
|
||||
}
|
||||
|
||||
// nodeSelectionPredicates runs a set of predicates that select candidate nodes for the DaemonSet;
|
||||
// checkNodeFitness runs a set of predicates that select candidate nodes for the DaemonSet;
|
||||
// the predicates include:
|
||||
// - PodFitsHost: checks pod's NodeName against node
|
||||
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
|
||||
func nodeSelectionPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
// - PodToleratesNodeTaints: exclude tainted node unless pod has specific toleration
|
||||
func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
@ -1445,6 +1447,14 @@ func nodeSelectionPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, node
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
|
||||
fit, reasons, err = predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
@ -1453,9 +1463,9 @@ func nodeSelectionPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, node
|
||||
func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
|
||||
// If ScheduleDaemonSetPods is enabled, only check nodeSelector and nodeAffinity.
|
||||
// If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
|
||||
fit, reasons, err := nodeSelectionPredicates(pod, nil, nodeInfo)
|
||||
fit, reasons, err := checkNodeFitness(pod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
@ -1466,8 +1476,7 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCriticalPod(pod)
|
||||
critical := kubelettypes.IsCriticalPod(pod)
|
||||
|
||||
fit, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
@ -1493,19 +1502,6 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []*apps.DaemonSet
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o byCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
type podByCreationTimestampAndPhase []*v1.Pod
|
||||
|
||||
func (o podByCreationTimestampAndPhase) Len() int { return len(o) }
|
||||
@ -1535,3 +1531,7 @@ func isControlledByDaemonSet(p *v1.Pod, uuid types.UID) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func failedPodsBackoffKey(ds *apps.DaemonSet, nodeName string) string {
|
||||
return fmt.Sprintf("%s/%d/%s", ds.UID, ds.Status.ObservedGeneration, nodeName)
|
||||
}
|
||||
|
2881
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
2881
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
29
vendor/k8s.io/kubernetes/pkg/controller/daemon/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/controller/daemon/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
40
vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/controller/daemon/update.go
generated
vendored
@ -19,9 +19,10 @@ package daemon
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -31,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||
@ -55,23 +55,23 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string)
|
||||
|
||||
// for oldPods delete all not running pods
|
||||
var oldPodsToDelete []string
|
||||
glog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
klog.V(4).Infof("Marking all unavailable old pods for deletion")
|
||||
for _, pod := range oldUnavailablePods {
|
||||
// Skip terminating pods. We won't delete them again
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Marking old pods for deletion")
|
||||
klog.V(4).Infof("Marking old pods for deletion")
|
||||
for _, pod := range oldAvailablePods {
|
||||
if numUnavailable >= maxUnavailable {
|
||||
glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
klog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable)
|
||||
break
|
||||
}
|
||||
glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name)
|
||||
oldPodsToDelete = append(oldPodsToDelete, pod.Name)
|
||||
numUnavailable++
|
||||
}
|
||||
@ -316,8 +316,8 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
|
||||
name := ds.Name + "-" + rand.SafeEncodeString(hash)
|
||||
hash := controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount)
|
||||
name := ds.Name + "-" + hash
|
||||
history := &apps.ControllerRevision{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -331,27 +331,31 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
||||
}
|
||||
|
||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
|
||||
if errors.IsAlreadyExists(err) {
|
||||
if outerErr := err; errors.IsAlreadyExists(outerErr) {
|
||||
// TODO: Is it okay to get from historyLister?
|
||||
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
// Check if we already created it
|
||||
done, err := Match(ds, existedHistory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
done, matchErr := Match(ds, existedHistory)
|
||||
if matchErr != nil {
|
||||
return nil, matchErr
|
||||
}
|
||||
if done {
|
||||
return existedHistory, nil
|
||||
}
|
||||
|
||||
// Handle name collisions between different history
|
||||
// TODO: Is it okay to get from dsLister?
|
||||
// Get the latest DaemonSet from the API server to make sure collision count is only increased when necessary
|
||||
currDS, getErr := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
// If the collision count used to compute hash was in fact stale, there's no need to bump collision count; retry again
|
||||
if !reflect.DeepEqual(currDS.Status.CollisionCount, ds.Status.CollisionCount) {
|
||||
return nil, fmt.Errorf("found a stale collision count (%d, expected %d) of DaemonSet %q while processing; will retry until it is updated", ds.Status.CollisionCount, currDS.Status.CollisionCount, ds.Name)
|
||||
}
|
||||
if currDS.Status.CollisionCount == nil {
|
||||
currDS.Status.CollisionCount = new(int32)
|
||||
}
|
||||
@ -360,8 +364,8 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*
|
||||
if updateErr != nil {
|
||||
return nil, updateErr
|
||||
}
|
||||
glog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
|
||||
return nil, err
|
||||
klog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount)
|
||||
return nil, outerErr
|
||||
}
|
||||
return history, err
|
||||
}
|
||||
@ -389,7 +393,7 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToD
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
glog.V(4).Infof("Getting unavailable numbers")
|
||||
klog.V(4).Infof("Getting unavailable numbers")
|
||||
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
@ -428,7 +432,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeT
|
||||
if err != nil {
|
||||
return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
|
||||
}
|
||||
glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||
klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable)
|
||||
return maxUnavailable, numUnavailable, nil
|
||||
}
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
@ -15,13 +15,12 @@ go_library(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -40,16 +39,20 @@ filegroup(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["daemonset_util_test.go"],
|
||||
srcs = [
|
||||
"daemonset_util_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
100
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
100
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
@ -23,14 +23,13 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
@ -49,17 +48,14 @@ func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
|
||||
return &generation, nil
|
||||
}
|
||||
|
||||
// CreatePodTemplate returns copy of provided template with additional
|
||||
// label which contains templateGeneration (for backward compatibility),
|
||||
// hash of provided template and sets default daemon tolerations.
|
||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
|
||||
newTemplate := *template.DeepCopy()
|
||||
// AddOrUpdateDaemonPodTolerations apply necessary tolerations to DeamonSet Pods, e.g. node.kubernetes.io/not-ready:NoExecute.
|
||||
func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@ -68,36 +64,67 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash stri
|
||||
// Add infinite toleration for taint unreachable:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
|
||||
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
|
||||
// MemoryPressure and DisPressure taints, and the critical pods should tolerate
|
||||
// OutOfDisk taint.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeDiskPressure,
|
||||
// MemoryPressure, DisPressure, Unschedulable and NetworkUnavailable taints,
|
||||
// and the critical pods should tolerate OutOfDisk taint.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeMemoryPressure,
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
if spec.HostNetwork {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO(#48843) OutOfDisk taints will be removed in 1.10
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCritical(newTemplate.Namespace, newTemplate.Annotations) {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(&newTemplate.Spec, &v1.Toleration{
|
||||
Key: algorithm.TaintNodeOutOfDisk,
|
||||
if isCritical {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeOutOfDisk,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// CreatePodTemplate returns copy of provided template with additional
|
||||
// label which contains templateGeneration (for backward compatibility),
|
||||
// hash of provided template and sets default daemon tolerations.
|
||||
func CreatePodTemplate(ns string, template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
|
||||
newTemplate := *template.DeepCopy()
|
||||
|
||||
// TODO(k82cn): when removing CritialPod feature, also remove 'ns' parameter.
|
||||
isCritical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCritical(ns, newTemplate.Annotations)
|
||||
|
||||
AddOrUpdateDaemonPodTolerations(&newTemplate.Spec, isCritical)
|
||||
|
||||
if newTemplate.ObjectMeta.Labels == nil {
|
||||
newTemplate.ObjectMeta.Labels = make(map[string]string)
|
||||
@ -140,7 +167,7 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
|
||||
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
|
||||
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
|
||||
nodeSelReq := v1.NodeSelectorRequirement{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodename},
|
||||
}
|
||||
@ -185,31 +212,6 @@ func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename str
|
||||
return affinity
|
||||
}
|
||||
|
||||
// AppendNoScheduleTolerationIfNotExist appends unschedulable toleration to `.spec` if not exist; otherwise,
|
||||
// no changes to `.spec.tolerations`.
|
||||
func AppendNoScheduleTolerationIfNotExist(tolerations []v1.Toleration) []v1.Toleration {
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableTaintExist := false
|
||||
|
||||
for _, t := range tolerations {
|
||||
if apiequality.Semantic.DeepEqual(t, unschedulableToleration) {
|
||||
unschedulableTaintExist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !unschedulableTaintExist {
|
||||
tolerations = append(tolerations, unschedulableToleration)
|
||||
}
|
||||
|
||||
return tolerations
|
||||
}
|
||||
|
||||
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
|
||||
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
|
||||
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.
|
||||
@ -234,11 +236,11 @@ func GetTargetNodeName(pod *v1.Pod) (string, error) {
|
||||
|
||||
for _, term := range terms {
|
||||
for _, exp := range term.MatchFields {
|
||||
if exp.Key == algorithm.NodeFieldSelectorKeyNodeName &&
|
||||
if exp.Key == schedulerapi.NodeFieldSelectorKeyNodeName &&
|
||||
exp.Operator == v1.NodeSelectorOpIn {
|
||||
if len(exp.Values) != 1 {
|
||||
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
|
||||
algorithm.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
schedulerapi.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
return exp.Values[0], nil
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
@ -154,7 +154,7 @@ func TestCreatePodTemplate(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
podTemplateSpec := v1.PodTemplateSpec{}
|
||||
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
|
||||
newPodTemplate := CreatePodTemplate("", podTemplateSpec, test.templateGeneration, test.hash)
|
||||
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
if !exists || val != fmt.Sprint(*test.templateGeneration) {
|
||||
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
|
||||
@ -190,7 +190,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -227,7 +227,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -277,7 +277,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -296,7 +296,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1", "host_2"},
|
||||
},
|
||||
@ -314,7 +314,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -335,7 +335,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -363,7 +363,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@ -381,7 +381,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -400,7 +400,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@ -418,7 +418,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -458,7 +458,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@ -529,7 +529,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
@ -557,7 +557,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
Reference in New Issue
Block a user