mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD
generated
vendored
@ -23,7 +23,7 @@ go_library(
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
@ -33,6 +33,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
@ -68,6 +69,8 @@ go_test(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
|
300
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
300
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller.go
generated
vendored
@ -23,11 +23,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -53,10 +56,8 @@ import (
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -112,6 +113,8 @@ type DaemonSetsController struct {
|
||||
historyStoreSynced cache.InformerSynced
|
||||
// podLister get list/get pods from the shared informers's store
|
||||
podLister corelisters.PodLister
|
||||
// podNodeIndex indexes pods by their nodeName
|
||||
podNodeIndex cache.Indexer
|
||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
podStoreSynced cache.InformerSynced
|
||||
@ -134,8 +137,7 @@ type DaemonSetsController struct {
|
||||
func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
@ -191,6 +193,12 @@ func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
DeleteFunc: dsc.deletePod,
|
||||
})
|
||||
dsc.podLister = podInformer.Lister()
|
||||
|
||||
// This custom indexer will index pods based on their NodeName which will decrease the amount of pods we need to get in simulate() call.
|
||||
podInformer.Informer().GetIndexer().AddIndexers(cache.Indexers{
|
||||
"nodeName": indexByPodNodeName,
|
||||
})
|
||||
dsc.podNodeIndex = podInformer.Informer().GetIndexer()
|
||||
dsc.podStoreSynced = podInformer.Informer().HasSynced
|
||||
|
||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -207,6 +215,18 @@ func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer,
|
||||
return dsc, nil
|
||||
}
|
||||
|
||||
func indexByPodNodeName(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, nil
|
||||
}
|
||||
// We are only interested in active pods with nodeName set
|
||||
if len(pod.Spec.NodeName) == 0 || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
return []string{}, nil
|
||||
}
|
||||
return []string{pod.Spec.NodeName}, nil
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
ds, ok := obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
@ -763,7 +783,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, e
|
||||
return cm.ClaimPods(pods)
|
||||
}
|
||||
|
||||
// getNodesToDaemonPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
|
||||
// getNodesToDaemonPods returns a map from nodes to daemon pods (corresponding to ds) created for the nodes.
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned Pods are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
@ -775,9 +795,16 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[s
|
||||
// Group Pods by Node name.
|
||||
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
||||
for _, pod := range claimedPods {
|
||||
nodeName := pod.Spec.NodeName
|
||||
nodeName, err := util.GetTargetNodeName(pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v",
|
||||
pod.Namespace, pod.Name, ds.Namespace, ds.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeToDaemonPods[nodeName] = append(nodeToDaemonPods[nodeName], pod)
|
||||
}
|
||||
|
||||
return nodeToDaemonPods, nil
|
||||
}
|
||||
|
||||
@ -802,12 +829,76 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll
|
||||
return ds
|
||||
}
|
||||
|
||||
// podsShouldBeOnNode figures out the DaemonSet pods to be created and deleted on the given node:
|
||||
// - nodesNeedingDaemonPods: the pods need to start on the node
|
||||
// - podsToDelete: the Pods need to be deleted on the node
|
||||
// - failedPodsObserved: the number of failed pods on node
|
||||
// - err: unexpected error
|
||||
func (dsc *DaemonSetsController) podsShouldBeOnNode(
|
||||
node *v1.Node,
|
||||
nodeToDaemonPods map[string][]*v1.Pod,
|
||||
ds *apps.DaemonSet,
|
||||
) (nodesNeedingDaemonPods, podsToDelete []string, failedPodsObserved int, err error) {
|
||||
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
dsKey, _ := cache.MetaNamespaceKeyFunc(ds)
|
||||
dsc.removeSuspendedDaemonPods(node.Name, dsKey)
|
||||
|
||||
switch {
|
||||
case wantToRun && !shouldSchedule:
|
||||
// If daemon pod is supposed to run, but can not be scheduled, add to suspended list.
|
||||
dsc.addSuspendedDaemonPods(node.Name, dsKey)
|
||||
case shouldSchedule && !exists:
|
||||
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
|
||||
case shouldContinueRunning:
|
||||
// If a daemon pod failed, delete it
|
||||
// If there's non-daemon pods left on this node, we will create it in the next sync loop
|
||||
var daemonPodsRunning []*v1.Pod
|
||||
for _, pod := range daemonPods {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
glog.V(2).Infof(msg)
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
failedPodsObserved++
|
||||
} else {
|
||||
daemonPodsRunning = append(daemonPodsRunning, pod)
|
||||
}
|
||||
}
|
||||
// If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
|
||||
// Sort the daemon pods by creation time, so the oldest is preserved.
|
||||
if len(daemonPodsRunning) > 1 {
|
||||
sort.Sort(podByCreationTimestampAndPhase(daemonPodsRunning))
|
||||
for i := 1; i < len(daemonPodsRunning); i++ {
|
||||
podsToDelete = append(podsToDelete, daemonPodsRunning[i].Name)
|
||||
}
|
||||
}
|
||||
case !shouldContinueRunning && exists:
|
||||
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
|
||||
for _, pod := range daemonPods {
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nodesNeedingDaemonPods, podsToDelete, failedPodsObserved, nil
|
||||
}
|
||||
|
||||
// manage manages the scheduling and running of Pods of ds on nodes.
|
||||
// After figuring out which nodes should run a Pod of ds but not yet running one and
|
||||
// which nodes should not run a Pod of ds but currently running one, it calls function
|
||||
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
|
||||
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
// Find out which nodes are running the daemon pods controlled by ds.
|
||||
// Find out the pods which are created for the nodes by DaemonSet.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@ -822,55 +913,16 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
var nodesNeedingDaemonPods, podsToDelete []string
|
||||
var failedPodsObserved int
|
||||
for _, node := range nodeList {
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
nodesNeedingDaemonPodsOnNode, podsToDeleteOnNode, failedPodsObservedOnNode, err := dsc.podsShouldBeOnNode(
|
||||
node, nodeToDaemonPods, ds)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
daemonPods, exists := nodeToDaemonPods[node.Name]
|
||||
dsKey, _ := cache.MetaNamespaceKeyFunc(ds)
|
||||
dsc.removeSuspendedDaemonPods(node.Name, dsKey)
|
||||
|
||||
switch {
|
||||
case wantToRun && !shouldSchedule:
|
||||
// If daemon pod is supposed to run, but can not be scheduled, add to suspended list.
|
||||
dsc.addSuspendedDaemonPods(node.Name, dsKey)
|
||||
case shouldSchedule && !exists:
|
||||
// If daemon pod is supposed to be running on node, but isn't, create daemon pod.
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, node.Name)
|
||||
case shouldContinueRunning:
|
||||
// If a daemon pod failed, delete it
|
||||
// If there's non-daemon pods left on this node, we will create it in the next sync loop
|
||||
var daemonPodsRunning []*v1.Pod
|
||||
for _, pod := range daemonPods {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name)
|
||||
glog.V(2).Infof(msg)
|
||||
// Emit an event so that it's discoverable to users.
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg)
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
failedPodsObserved++
|
||||
} else {
|
||||
daemonPodsRunning = append(daemonPodsRunning, pod)
|
||||
}
|
||||
}
|
||||
// If daemon pod is supposed to be running on node, but more than 1 daemon pod is running, delete the excess daemon pods.
|
||||
// Sort the daemon pods by creation time, so the oldest is preserved.
|
||||
if len(daemonPodsRunning) > 1 {
|
||||
sort.Sort(podByCreationTimestamp(daemonPodsRunning))
|
||||
for i := 1; i < len(daemonPodsRunning); i++ {
|
||||
podsToDelete = append(podsToDelete, daemonPodsRunning[i].Name)
|
||||
}
|
||||
}
|
||||
case !shouldContinueRunning && exists:
|
||||
// If daemon pod isn't supposed to run on node, but it is, delete all daemon pods on node.
|
||||
for _, pod := range daemonPods {
|
||||
podsToDelete = append(podsToDelete, pod.Name)
|
||||
}
|
||||
}
|
||||
nodesNeedingDaemonPods = append(nodesNeedingDaemonPods, nodesNeedingDaemonPodsOnNode...)
|
||||
podsToDelete = append(podsToDelete, podsToDeleteOnNode...)
|
||||
failedPodsObserved += failedPodsObservedOnNode
|
||||
}
|
||||
|
||||
// Label new pods using the hash label value of the current history when creating them
|
||||
@ -934,7 +986,26 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod
|
||||
for i := pos; i < pos+batchSize; i++ {
|
||||
go func(ix int) {
|
||||
defer createWait.Done()
|
||||
err := dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, &template, ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
var err error
|
||||
|
||||
podTemplate := &template
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
|
||||
podTemplate = template.DeepCopy()
|
||||
// The pod's NodeAffinity will be updated to make sure the Pod is bound
|
||||
// to the target node by default scheduler. It is safe to do so because there
|
||||
// should be no conflicting node affinity with the target node.
|
||||
podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity(
|
||||
podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix])
|
||||
podTemplate.Spec.Tolerations = util.AppendNoScheduleTolerationIfNotExist(podTemplate.Spec.Tolerations)
|
||||
|
||||
err = dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate,
|
||||
ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
} else {
|
||||
err = dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, podTemplate,
|
||||
ds, metav1.NewControllerRef(ds, controllerKind))
|
||||
}
|
||||
|
||||
if err != nil && errors.IsTimeout(err) {
|
||||
// Pod is created but its initialization has timed out.
|
||||
// If the initialization is successful eventually, the
|
||||
@ -1058,7 +1129,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
|
||||
currentNumberScheduled++
|
||||
// Sort the daemon pods by creation time, so that the oldest is first.
|
||||
daemonPods, _ := nodeToDaemonPods[node.Name]
|
||||
sort.Sort(podByCreationTimestamp(daemonPods))
|
||||
sort.Sort(podByCreationTimestampAndPhase(daemonPods))
|
||||
pod := daemonPods[0]
|
||||
if podutil.IsPodReady(pod) {
|
||||
numberReady++
|
||||
@ -1095,7 +1166,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
|
||||
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -1126,6 +1197,18 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return fmt.Errorf("couldn't get key for object %#v: %v", ds, err)
|
||||
}
|
||||
|
||||
// If the DaemonSet is being deleted (either by foreground deletion or
|
||||
// orphan deletion), we cannot be sure if the DaemonSet history objects
|
||||
// it owned still exist -- those history objects can either be deleted
|
||||
// or orphaned. Garbage collector doesn't guarantee that it will delete
|
||||
// DaemonSet pods before deleting DaemonSet history objects, because
|
||||
// DaemonSet history doesn't own DaemonSet pods. We cannot reliably
|
||||
// calculate the status of a DaemonSet being deleted. Therefore, return
|
||||
// here without updating status for the DaemonSet being deleted.
|
||||
if ds.DeletionTimestamp != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Construct histories of the DaemonSet, and get the hash of current history
|
||||
cur, old, err := dsc.constructHistory(ds)
|
||||
if err != nil {
|
||||
@ -1133,7 +1216,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
}
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
||||
if ds.DeletionTimestamp != nil || !dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
if !dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
// Only update status.
|
||||
return dsc.updateDaemonSetStatus(ds, hash)
|
||||
}
|
||||
@ -1209,30 +1292,27 @@ func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *app
|
||||
})
|
||||
}
|
||||
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
podList, err := dsc.podLister.List(labels.Everything())
|
||||
objects, err := dsc.podNodeIndex.ByIndex("nodeName", node.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, pod := range podList {
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
continue
|
||||
}
|
||||
// ignore pods that belong to the daemonset when taking into account whether
|
||||
// a daemonset should bind to a node.
|
||||
if metav1.IsControlledBy(pod, ds) {
|
||||
continue
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
nodeInfo := schedulercache.NewNodeInfo(pods...)
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
for _, obj := range objects {
|
||||
// Ignore pods that belong to the daemonset when taking into account whether a daemonset should bind to a node.
|
||||
// TODO: replace this with metav1.IsControlledBy() in 1.12
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if isControlledByDaemonSet(pod, ds.GetUID()) {
|
||||
continue
|
||||
}
|
||||
nodeInfo.AddPod(pod)
|
||||
}
|
||||
|
||||
_, reasons, err := Predicates(newPod, nodeInfo)
|
||||
return reasons, nodeInfo, err
|
||||
}
|
||||
@ -1267,6 +1347,9 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.
|
||||
return false, false, false, err
|
||||
}
|
||||
|
||||
// TODO(k82cn): When 'ScheduleDaemonSetPods' upgrade to beta or GA, remove unnecessary check on failure reason,
|
||||
// e.g. InsufficientResourceError; and simplify "wantToRun, shouldSchedule, shouldContinueRunning"
|
||||
// into one result, e.g. selectedNode.
|
||||
var insufficientResourceErr error
|
||||
for _, r := range reasons {
|
||||
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
@ -1341,11 +1424,50 @@ func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
|
||||
return newPod
|
||||
}
|
||||
|
||||
// nodeSelectionPredicates runs a set of predicates that select candidate nodes for the DaemonSet;
|
||||
// the predicates include:
|
||||
// - PodFitsHost: checks pod's NodeName against node
|
||||
// - PodMatchNodeSelector: checks pod's NodeSelector and NodeAffinity against node
|
||||
func nodeSelectionPredicates(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
fit, reasons, err := predicates.PodFitsHost(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
|
||||
fit, reasons, err = predicates.PodMatchNodeSelector(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
// Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates
|
||||
// and PodToleratesNodeTaints predicate
|
||||
func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) && kubelettypes.IsCriticalPod(pod)
|
||||
|
||||
// If ScheduleDaemonSetPods is enabled, only check nodeSelector and nodeAffinity.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) {
|
||||
fit, reasons, err := nodeSelectionPredicates(pod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCriticalPod(pod)
|
||||
|
||||
fit, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
|
||||
if err != nil {
|
||||
@ -1384,14 +1506,32 @@ func (o byCreationTimestamp) Less(i, j int) bool {
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
type podByCreationTimestamp []*v1.Pod
|
||||
type podByCreationTimestampAndPhase []*v1.Pod
|
||||
|
||||
func (o podByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
func (o podByCreationTimestampAndPhase) Len() int { return len(o) }
|
||||
func (o podByCreationTimestampAndPhase) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o podByCreationTimestampAndPhase) Less(i, j int) bool {
|
||||
// Scheduled Pod first
|
||||
if len(o[i].Spec.NodeName) != 0 && len(o[j].Spec.NodeName) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(o[i].Spec.NodeName) == 0 && len(o[j].Spec.NodeName) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
func (o podByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
func isControlledByDaemonSet(p *v1.Pod, uuid types.UID) bool {
|
||||
for _, ref := range p.OwnerReferences {
|
||||
if ref.Controller != nil && *ref.Controller && ref.UID == uuid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
143
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
143
vendor/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go
generated
vendored
@ -43,6 +43,8 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
@ -145,7 +147,7 @@ func updateStrategies() []*apps.DaemonSetUpdateStrategy {
|
||||
|
||||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
return &v1.Node{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: label,
|
||||
@ -195,7 +197,7 @@ func newPod(podName string, nodeName string, label map[string]string, ds *apps.D
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Labels: newLabels,
|
||||
@ -267,6 +269,31 @@ func (f *fakePodControl) CreatePodsOnNode(nodeName, namespace string, template *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePodControl) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if err := f.FakePodControl.CreatePodsWithControllerRef(namespace, template, object, controllerRef); err != nil {
|
||||
return fmt.Errorf("failed to create pod for DaemonSet")
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: template.Labels,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
pod.Name = names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%p-", pod))
|
||||
|
||||
if err := legacyscheme.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil {
|
||||
return fmt.Errorf("unable to convert pod template: %v", err)
|
||||
}
|
||||
|
||||
f.podStore.Update(pod)
|
||||
f.podIDMap[pod.Name] = pod
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
@ -423,6 +450,97 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// When ScheduleDaemonSetPods is enabled, DaemonSets without node selectors should
|
||||
// launch pods on every node by NodeAffinity.
|
||||
func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
||||
t.Skip("disabled for 1.10")
|
||||
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods)
|
||||
// Rollback feature gate.
|
||||
defer func() {
|
||||
if !enabled {
|
||||
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=false")
|
||||
}
|
||||
}()
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set("ScheduleDaemonSetPods=true")
|
||||
|
||||
nodeNum := 5
|
||||
|
||||
for _, strategy := range updateStrategies() {
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.UpdateStrategy = *strategy
|
||||
manager, podControl, _, err := newTestController(ds)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
addNodes(manager.nodeStore, 0, nodeNum, nil)
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, nodeNum, 0, 0)
|
||||
|
||||
// Check for ScheduleDaemonSetPods feature
|
||||
if len(podControl.podIDMap) != nodeNum {
|
||||
t.Fatalf("failed to create pods for DaemonSet when enabled ScheduleDaemonSetPods.")
|
||||
}
|
||||
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
for _, node := range manager.nodeStore.List() {
|
||||
n := node.(*v1.Node)
|
||||
nodeMap[n.Name] = n
|
||||
}
|
||||
|
||||
if len(nodeMap) != nodeNum {
|
||||
t.Fatalf("not enough nodes in the store, expected: %v, got: %v",
|
||||
nodeNum, len(nodeMap))
|
||||
}
|
||||
|
||||
for _, pod := range podControl.podIDMap {
|
||||
if len(pod.Spec.NodeName) != 0 {
|
||||
t.Fatalf("the hostname of pod %v should be empty, but got %s",
|
||||
pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
if pod.Spec.Affinity == nil {
|
||||
t.Fatalf("the Affinity of pod %s is nil.", pod.Name)
|
||||
}
|
||||
if pod.Spec.Affinity.NodeAffinity == nil {
|
||||
t.Fatalf("the NodeAffinity of pod %s is nil.", pod.Name)
|
||||
}
|
||||
|
||||
nodeSelector := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
if nodeSelector == nil {
|
||||
t.Fatalf("the node selector of pod %s is nil.", pod.Name)
|
||||
}
|
||||
if len(nodeSelector.NodeSelectorTerms) != 1 {
|
||||
t.Fatalf("incorrect node selector terms number of pod %s, expected: 1, got: %d.",
|
||||
pod.Name, len(nodeSelector.NodeSelectorTerms))
|
||||
}
|
||||
|
||||
if len(nodeSelector.NodeSelectorTerms[0].MatchExpressions) != 1 {
|
||||
t.Fatalf("incorrect expression number of pod %s node selector term, expected: 1, got: %d.",
|
||||
pod.Name, len(nodeSelector.NodeSelectorTerms[0].MatchExpressions))
|
||||
}
|
||||
|
||||
exp := nodeSelector.NodeSelectorTerms[0].MatchExpressions[0]
|
||||
if exp.Key == kubeletapis.LabelHostname {
|
||||
if exp.Operator != v1.NodeSelectorOpIn {
|
||||
t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn)
|
||||
}
|
||||
|
||||
if len(exp.Values) != 1 {
|
||||
t.Fatalf("incorrect hostname in node affinity: expected 1, got %v", len(exp.Values))
|
||||
}
|
||||
|
||||
delete(nodeMap, exp.Values[0])
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodeMap) != 0 {
|
||||
t.Fatalf("did not foud pods on nodes %+v", nodeMap)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Simulate a cluster with 100 nodes, but simulate a limit (like a quota limit)
|
||||
// of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass
|
||||
func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
|
||||
@ -1545,6 +1663,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
predicateName string
|
||||
podsOnNode []*v1.Pod
|
||||
nodeCondition []v1.NodeCondition
|
||||
nodeUnschedulable bool
|
||||
ds *apps.DaemonSet
|
||||
wantToRun, shouldSchedule, shouldContinueRunning bool
|
||||
err error
|
||||
@ -1800,6 +1919,24 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
shouldSchedule: true,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPodOnUnscheduableNode",
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: resourcePodSpec("", "50M", "0.5"),
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeUnschedulable: true,
|
||||
wantToRun: true,
|
||||
shouldSchedule: true,
|
||||
shouldContinueRunning: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
@ -1807,6 +1944,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
node := newNode("test-node", simpleDaemonSetLabel)
|
||||
node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
|
||||
node.Status.Allocatable = allocatableResources("100M", "1")
|
||||
node.Spec.Unschedulable = c.nodeUnschedulable
|
||||
manager, _, _, err := newTestController()
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
@ -1815,6 +1953,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
for _, p := range c.podsOnNode {
|
||||
manager.podStore.Add(p)
|
||||
p.Spec.NodeName = "test-node"
|
||||
manager.podNodeIndex.Add(p)
|
||||
}
|
||||
c.ds.Spec.UpdateStrategy = *strategy
|
||||
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
@ -43,8 +44,12 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
115
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
115
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
@ -133,3 +134,117 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
|
||||
}
|
||||
return availablePods, unavailablePods
|
||||
}
|
||||
|
||||
// ReplaceDaemonSetPodNodeNameNodeAffinity replaces the RequiredDuringSchedulingIgnoredDuringExecution
|
||||
// NodeAffinity of the given affinity with a new NodeAffinity that selects the given nodeName.
|
||||
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
|
||||
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
|
||||
nodeSelReq := v1.NodeSelectorRequirement{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodename},
|
||||
}
|
||||
|
||||
nodeSelector := &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if affinity == nil {
|
||||
return &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if affinity.NodeAffinity == nil {
|
||||
affinity.NodeAffinity = &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector,
|
||||
}
|
||||
return affinity
|
||||
}
|
||||
|
||||
nodeAffinity := affinity.NodeAffinity
|
||||
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = nodeSelector
|
||||
return affinity
|
||||
}
|
||||
|
||||
// Replace node selector with the new one.
|
||||
nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{nodeSelReq},
|
||||
},
|
||||
}
|
||||
|
||||
return affinity
|
||||
}
|
||||
|
||||
// AppendNoScheduleTolerationIfNotExist appends unschedulable toleration to `.spec` if not exist; otherwise,
|
||||
// no changes to `.spec.tolerations`.
|
||||
func AppendNoScheduleTolerationIfNotExist(tolerations []v1.Toleration) []v1.Toleration {
|
||||
unschedulableToleration := v1.Toleration{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
unschedulableTaintExist := false
|
||||
|
||||
for _, t := range tolerations {
|
||||
if apiequality.Semantic.DeepEqual(t, unschedulableToleration) {
|
||||
unschedulableTaintExist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !unschedulableTaintExist {
|
||||
tolerations = append(tolerations, unschedulableToleration)
|
||||
}
|
||||
|
||||
return tolerations
|
||||
}
|
||||
|
||||
// GetTargetNodeName get the target node name of DaemonSet pods. If `.spec.NodeName` is not empty (nil),
|
||||
// return `.spec.NodeName`; otherwise, retrieve node name of pending pods from NodeAffinity. Return error
|
||||
// if failed to retrieve node name from `.spec.NodeName` and NodeAffinity.
|
||||
func GetTargetNodeName(pod *v1.Pod) (string, error) {
|
||||
if len(pod.Spec.NodeName) != 0 {
|
||||
return pod.Spec.NodeName, nil
|
||||
}
|
||||
|
||||
// If ScheduleDaemonSetPods was enabled before, retrieve node name of unscheduled pods from NodeAffinity
|
||||
if pod.Spec.Affinity == nil ||
|
||||
pod.Spec.Affinity.NodeAffinity == nil ||
|
||||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
return "", fmt.Errorf("no spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution for pod %s/%s",
|
||||
pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
terms := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
if len(terms) < 1 {
|
||||
return "", fmt.Errorf("no nodeSelectorTerms in requiredDuringSchedulingIgnoredDuringExecution of pod %s/%s",
|
||||
pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
for _, term := range terms {
|
||||
for _, exp := range term.MatchFields {
|
||||
if exp.Key == algorithm.NodeFieldSelectorKeyNodeName &&
|
||||
exp.Operator == v1.NodeSelectorOpIn {
|
||||
if len(exp.Values) != 1 {
|
||||
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
|
||||
algorithm.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
return exp.Values[0], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no node name found for pod %s/%s", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
429
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
429
vendor/k8s.io/kubernetes/pkg/controller/daemon/util/daemonset_util_test.go
generated
vendored
@ -18,12 +18,17 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
@ -168,3 +173,427 @@ func int64Ptr(i int) *int64 {
|
||||
li := int64(i)
|
||||
return &li
|
||||
}
|
||||
|
||||
func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
tests := []struct {
|
||||
affinity *v1.Affinity
|
||||
hostname string
|
||||
expected *v1.Affinity
|
||||
}{
|
||||
{
|
||||
affinity: nil,
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: kubeletapis.LabelHostname,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1", "host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: nil,
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "hostname",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
// NOTE: Only `metadata.name` is valid key in `MatchFields` in 1.11;
|
||||
// added this case for compatibility: the feature works as normal
|
||||
// when new Keys introduced.
|
||||
Key: "metadata.foo",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
hostname: "host_1",
|
||||
expected: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
got := ReplaceDaemonSetPodNodeNameNodeAffinity(test.affinity, test.hostname)
|
||||
if !reflect.DeepEqual(test.expected, got) {
|
||||
t.Errorf("Failed to append NodeAffinity in case %d, got: %v, expected: %v",
|
||||
i, got, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func forEachFeatureGate(t *testing.T, tf func(t *testing.T), gates ...utilfeature.Feature) {
|
||||
for _, fg := range gates {
|
||||
func() {
|
||||
enabled := utilfeature.DefaultFeatureGate.Enabled(fg)
|
||||
defer func() {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, enabled))
|
||||
}()
|
||||
|
||||
for _, f := range []bool{true, false} {
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%v=%t", fg, f))
|
||||
t.Run(fmt.Sprintf("%v (%t)", fg, f), tf)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetTargetNodeName(t *testing.T) {
|
||||
testFun := func(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeName string
|
||||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node-1",
|
||||
},
|
||||
},
|
||||
nodeName: "node-1",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeName: "node-1",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: algorithm.NodeFieldSelectorKeyNodeName,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod4",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
},
|
||||
expectedErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got, err := GetTargetNodeName(test.pod)
|
||||
if test.expectedErr != (err != nil) {
|
||||
t.Errorf("Unexpected error, expectedErr: %v, err: %v", test.expectedErr, err)
|
||||
} else if !test.expectedErr {
|
||||
if test.nodeName != got {
|
||||
t.Errorf("Failed to get target node name, got: %v, expected: %v", got, test.nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
forEachFeatureGate(t, testFun, features.ScheduleDaemonSetPods)
|
||||
}
|
||||
|
Reference in New Issue
Block a user