mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
70
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller.go
generated
vendored
@ -22,6 +22,13 @@ limitations under the License.
|
||||
package nodelifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -50,12 +57,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -107,8 +108,6 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// The amount of time the nodecontroller polls on the list nodes endpoint.
|
||||
apiserverStartupGracePeriod = 10 * time.Minute
|
||||
// The amount of time the nodecontroller should sleep between retrying NodeStatus updates
|
||||
retrySleepTime = 20 * time.Millisecond
|
||||
)
|
||||
@ -153,9 +152,10 @@ type Controller struct {
|
||||
daemonSetStore extensionslisters.DaemonSetLister
|
||||
daemonSetInformerSynced cache.InformerSynced
|
||||
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
|
||||
nodeShutdownInCloudProvider func(context.Context, *v1.Node) (bool, error)
|
||||
|
||||
recorder record.EventRecorder
|
||||
|
||||
@ -241,6 +241,9 @@ func NewNodeLifecycleController(podInformer coreinformers.PodInformer,
|
||||
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) {
|
||||
return nodeutil.ExistsInCloudProvider(cloud, nodeName)
|
||||
},
|
||||
nodeShutdownInCloudProvider: func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return nodeutil.ShutdownInCloudProvider(ctx, cloud, node)
|
||||
},
|
||||
recorder: recorder,
|
||||
nodeMonitorPeriod: nodeMonitorPeriod,
|
||||
nodeStartupGracePeriod: nodeStartupGracePeriod,
|
||||
@ -438,7 +441,21 @@ func (nc *Controller) doNoScheduleTaintingPass(node *v1.Node) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Spec.Unschedulable {
|
||||
// If unschedulable, append related taint.
|
||||
taints = append(taints, v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
|
||||
// Get exist taints of node.
|
||||
nodeTaints := taintutils.TaintSetFilter(node.Spec.Taints, func(t *v1.Taint) bool {
|
||||
// Find unschedulable taint of node.
|
||||
if t.Key == algorithm.TaintNodeUnschedulable {
|
||||
return true
|
||||
}
|
||||
// Find node condition taints of node.
|
||||
_, found := taintKeyToNodeConditionMap[t.Key]
|
||||
return found
|
||||
})
|
||||
@ -655,6 +672,11 @@ func (nc *Controller) monitorNodeStatus() error {
|
||||
glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
|
||||
}
|
||||
}
|
||||
// remove shutdown taint this is needed always depending do we use taintbased or not
|
||||
err := nc.markNodeAsNotShutdown(node)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Report node event.
|
||||
@ -668,6 +690,19 @@ func (nc *Controller) monitorNodeStatus() error {
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node immediately.
|
||||
if currentReadyCondition.Status != v1.ConditionTrue && nc.cloud != nil {
|
||||
// check is node shutdowned, if yes do not deleted it. Instead add taint
|
||||
shutdown, err := nc.nodeShutdownInCloudProvider(context.TODO(), node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v shutdown in cloud: %v", node.Name, err)
|
||||
}
|
||||
// node shutdown
|
||||
if shutdown && err == nil {
|
||||
err = controller.AddOrUpdateTaintOnNode(nc.kubeClient, node.Name, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
|
||||
@ -1042,6 +1077,8 @@ func (nc *Controller) ReducedQPSFunc(nodeNum int) float32 {
|
||||
|
||||
// addPodEvictorForNewZone checks if new zone appeared, and if so add new evictor.
|
||||
func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
if _, found := nc.zoneStates[zone]; !found {
|
||||
nc.zoneStates[zone] = stateInitial
|
||||
@ -1104,6 +1141,17 @@ func (nc *Controller) markNodeAsReachable(node *v1.Node) (bool, error) {
|
||||
return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name), nil
|
||||
}
|
||||
|
||||
func (nc *Controller) markNodeAsNotShutdown(node *v1.Node) error {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, controller.ShutdownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ComputeZoneState returns a slice of NodeReadyConditions for all Nodes in a given zone.
|
||||
// The zone is considered:
|
||||
// - fullyDisrupted if there're no Ready Nodes,
|
||||
|
137
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go
generated
vendored
137
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package nodelifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -1360,6 +1361,118 @@ func TestMonitorNodeStatusEvictPodsWithDisruption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloudProviderNodeShutdown(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
node *v1.Node
|
||||
shutdown bool
|
||||
}{
|
||||
{
|
||||
testName: "node shutdowned add taint",
|
||||
shutdown: true,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "node started after shutdown remove taint",
|
||||
shutdown: false,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
fnh := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{tc.node},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
}
|
||||
nodeController, _ := newNodeLifecycleControllerFromClient(
|
||||
nil,
|
||||
fnh,
|
||||
10*time.Minute,
|
||||
testRateLimiterQPS,
|
||||
testRateLimiterQPS,
|
||||
testLargeClusterThreshold,
|
||||
testUnhealthyThreshold,
|
||||
testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod,
|
||||
testNodeMonitorPeriod,
|
||||
false)
|
||||
nodeController.cloud = &fakecloud.FakeCloud{}
|
||||
nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
|
||||
nodeController.recorder = testutil.NewFakeRecorder()
|
||||
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return tc.shutdown, nil
|
||||
}
|
||||
|
||||
if err := nodeController.syncNodeStore(fnh); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(fnh.UpdatedNodes) != 1 {
|
||||
t.Errorf("Node was not updated")
|
||||
}
|
||||
if tc.shutdown {
|
||||
if len(fnh.UpdatedNodes[0].Spec.Taints) != 1 {
|
||||
t.Errorf("Node Taint was not added")
|
||||
}
|
||||
if fnh.UpdatedNodes[0].Spec.Taints[0].Key != "node.cloudprovider.kubernetes.io/shutdown" {
|
||||
t.Errorf("Node Taint key is not correct")
|
||||
}
|
||||
} else {
|
||||
if len(fnh.UpdatedNodes[0].Spec.Taints) != 0 {
|
||||
t.Errorf("Node Taint was not removed after node is back in ready state")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes
|
||||
// pods and the node when kubelet has not reported, and the cloudprovider says
|
||||
// the node is gone.
|
||||
@ -1404,6 +1517,9 @@ func TestCloudProviderNoRateLimit(t *testing.T) {
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
// monitorNodeStatus should allow this node to be immediately deleted
|
||||
if err := nodeController.syncNodeStore(fnh); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@ -1543,9 +1659,6 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -1620,9 +1733,6 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1651,9 +1761,6 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -1755,9 +1862,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -1789,9 +1893,6 @@ func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
},
|
||||
},
|
||||
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
|
||||
@ -2208,9 +2309,6 @@ func TestNodeEventGeneration(t *testing.T) {
|
||||
UID: "1234567890",
|
||||
CreationTimestamp: metav1.Date(2015, 8, 10, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
@ -2242,6 +2340,9 @@ func TestNodeEventGeneration(t *testing.T) {
|
||||
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
nodeController.now = func() metav1.Time { return fakeNow }
|
||||
fakeRecorder := testutil.NewFakeRecorder()
|
||||
nodeController.recorder = fakeRecorder
|
||||
|
93
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go
generated
vendored
93
vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go
generated
vendored
@ -18,22 +18,23 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -59,6 +60,32 @@ type podUpdateItem struct {
|
||||
newTolerations []v1.Toleration
|
||||
}
|
||||
|
||||
func (n *nodeUpdateItem) name() string {
|
||||
if n.newNode != nil {
|
||||
return n.newNode.ObjectMeta.Name
|
||||
}
|
||||
if n.oldNode != nil {
|
||||
return n.oldNode.ObjectMeta.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *podUpdateItem) nodeName() string {
|
||||
if p.newPod != nil {
|
||||
return p.newPod.Spec.NodeName
|
||||
}
|
||||
if p.oldPod != nil {
|
||||
return p.oldPod.Spec.NodeName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func hash(val string, max int) int {
|
||||
hasher := fnv.New32a()
|
||||
io.WriteString(hasher, val)
|
||||
return int(hasher.Sum32() % uint32(max))
|
||||
}
|
||||
|
||||
// NoExecuteTaintManager listens to Taint/Toleration changes and is responsible for removing Pods
|
||||
// from Nodes tainted with NoExecute Taints.
|
||||
type NoExecuteTaintManager struct {
|
||||
@ -70,8 +97,8 @@ type NoExecuteTaintManager struct {
|
||||
taintedNodesLock sync.Mutex
|
||||
taintedNodes map[string][]v1.Taint
|
||||
|
||||
nodeUpdateChannel chan *nodeUpdateItem
|
||||
podUpdateChannel chan *podUpdateItem
|
||||
nodeUpdateChannels []chan *nodeUpdateItem
|
||||
podUpdateChannels []chan *podUpdateItem
|
||||
|
||||
nodeUpdateQueue workqueue.Interface
|
||||
podUpdateQueue workqueue.Interface
|
||||
@ -155,17 +182,15 @@ func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if c != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(c.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.CoreV1().Events("")})
|
||||
} else {
|
||||
glog.Fatalf("kubeClient is nil when starting NodeController")
|
||||
}
|
||||
|
||||
tm := &NoExecuteTaintManager{
|
||||
client: c,
|
||||
recorder: recorder,
|
||||
taintedNodes: make(map[string][]v1.Taint),
|
||||
nodeUpdateChannel: make(chan *nodeUpdateItem, nodeUpdateChannelSize),
|
||||
podUpdateChannel: make(chan *podUpdateItem, podUpdateChannelSize),
|
||||
client: c,
|
||||
recorder: recorder,
|
||||
taintedNodes: make(map[string][]v1.Taint),
|
||||
|
||||
nodeUpdateQueue: workqueue.New(),
|
||||
podUpdateQueue: workqueue.New(),
|
||||
@ -178,6 +203,15 @@ func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
|
||||
// Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed.
|
||||
func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
glog.V(0).Infof("Starting NoExecuteTaintManager")
|
||||
|
||||
// TODO: Figure out a reasonable number of workers and propagate the
|
||||
// number of workers up making it a paramater of Run() function.
|
||||
workers := 8
|
||||
for i := 0; i < workers; i++ {
|
||||
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan *nodeUpdateItem, nodeUpdateChannelSize))
|
||||
tc.podUpdateChannels = append(tc.podUpdateChannels, make(chan *podUpdateItem, podUpdateChannelSize))
|
||||
}
|
||||
|
||||
// Functions that are responsible for taking work items out of the workqueues and putting them
|
||||
// into channels.
|
||||
go func(stopCh <-chan struct{}) {
|
||||
@ -187,11 +221,14 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
break
|
||||
}
|
||||
nodeUpdate := item.(*nodeUpdateItem)
|
||||
hash := hash(nodeUpdate.name(), workers)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.nodeUpdateQueue.Done(item)
|
||||
break
|
||||
case tc.nodeUpdateChannel <- nodeUpdate:
|
||||
case tc.nodeUpdateChannels[hash] <- nodeUpdate:
|
||||
}
|
||||
tc.nodeUpdateQueue.Done(item)
|
||||
}
|
||||
}(stopCh)
|
||||
|
||||
@ -202,14 +239,28 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
break
|
||||
}
|
||||
podUpdate := item.(*podUpdateItem)
|
||||
hash := hash(podUpdate.nodeName(), workers)
|
||||
select {
|
||||
case <-stopCh:
|
||||
tc.podUpdateQueue.Done(item)
|
||||
break
|
||||
case tc.podUpdateChannel <- podUpdate:
|
||||
case tc.podUpdateChannels[hash] <- podUpdate:
|
||||
}
|
||||
tc.podUpdateQueue.Done(item)
|
||||
}
|
||||
}(stopCh)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go tc.worker(i, wg.Done, stopCh)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (tc *NoExecuteTaintManager) worker(worker int, done func(), stopCh <-chan struct{}) {
|
||||
defer done()
|
||||
|
||||
// When processing events we want to prioritize Node updates over Pod updates,
|
||||
// as NodeUpdates that interest NoExecuteTaintManager should be handled as soon as possible -
|
||||
// we don't want user (or system) to wait until PodUpdate queue is drained before it can
|
||||
@ -217,15 +268,15 @@ func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
break
|
||||
case nodeUpdate := <-tc.nodeUpdateChannel:
|
||||
return
|
||||
case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
|
||||
tc.handleNodeUpdate(nodeUpdate)
|
||||
case podUpdate := <-tc.podUpdateChannel:
|
||||
case podUpdate := <-tc.podUpdateChannels[worker]:
|
||||
// If we found a Pod update we need to empty Node queue first.
|
||||
priority:
|
||||
for {
|
||||
select {
|
||||
case nodeUpdate := <-tc.nodeUpdateChannel:
|
||||
case nodeUpdate := <-tc.nodeUpdateChannels[worker]:
|
||||
tc.handleNodeUpdate(nodeUpdate)
|
||||
default:
|
||||
break priority
|
||||
|
@ -61,7 +61,7 @@ func TestExecuteDelayed(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
now := time.Now()
|
||||
then := now.Add(3 * time.Second)
|
||||
then := now.Add(10 * time.Second)
|
||||
queue.AddWork(NewWorkArgs("1", "1"), now, then)
|
||||
queue.AddWork(NewWorkArgs("2", "2"), now, then)
|
||||
queue.AddWork(NewWorkArgs("3", "3"), now, then)
|
||||
@ -89,7 +89,7 @@ func TestCancel(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
now := time.Now()
|
||||
then := now.Add(3 * time.Second)
|
||||
then := now.Add(10 * time.Second)
|
||||
queue.AddWork(NewWorkArgs("1", "1"), now, then)
|
||||
queue.AddWork(NewWorkArgs("2", "2"), now, then)
|
||||
queue.AddWork(NewWorkArgs("3", "3"), now, then)
|
||||
@ -119,7 +119,7 @@ func TestCancelAndReadd(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
now := time.Now()
|
||||
then := now.Add(3 * time.Second)
|
||||
then := now.Add(10 * time.Second)
|
||||
queue.AddWork(NewWorkArgs("1", "1"), now, then)
|
||||
queue.AddWork(NewWorkArgs("2", "2"), now, then)
|
||||
queue.AddWork(NewWorkArgs("3", "3"), now, then)
|
||||
|
Reference in New Issue
Block a user