mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-27 15:30:23 +00:00
2551a0b05f
Signed-off-by: Niels de Vos <ndevos@ibm.com>
329 lines
8.1 KiB
Go
329 lines
8.1 KiB
Go
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package workqueue
|
|
|
|
import (
|
|
"sync"
|
|
"time"
|
|
|
|
"k8s.io/utils/clock"
|
|
)
|
|
|
|
type Interface interface {
|
|
Add(item interface{})
|
|
Len() int
|
|
Get() (item interface{}, shutdown bool)
|
|
Done(item interface{})
|
|
ShutDown()
|
|
ShutDownWithDrain()
|
|
ShuttingDown() bool
|
|
}
|
|
|
|
// QueueConfig specifies optional configurations to customize an Interface.
|
|
type QueueConfig struct {
|
|
// Name for the queue. If unnamed, the metrics will not be registered.
|
|
Name string
|
|
|
|
// MetricsProvider optionally allows specifying a metrics provider to use for the queue
|
|
// instead of the global provider.
|
|
MetricsProvider MetricsProvider
|
|
|
|
// Clock ability to inject real or fake clock for testing purposes.
|
|
Clock clock.WithTicker
|
|
}
|
|
|
|
// New constructs a new work queue (see the package comment).
|
|
func New() *Type {
|
|
return NewWithConfig(QueueConfig{
|
|
Name: "",
|
|
})
|
|
}
|
|
|
|
// NewWithConfig constructs a new workqueue with ability to
|
|
// customize different properties.
|
|
func NewWithConfig(config QueueConfig) *Type {
|
|
return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod)
|
|
}
|
|
|
|
// NewNamed creates a new named queue.
|
|
// Deprecated: Use NewWithConfig instead.
|
|
func NewNamed(name string) *Type {
|
|
return NewWithConfig(QueueConfig{
|
|
Name: name,
|
|
})
|
|
}
|
|
|
|
// newQueueWithConfig constructs a new named workqueue
|
|
// with the ability to customize different properties for testing purposes
|
|
func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type {
|
|
var metricsFactory *queueMetricsFactory
|
|
if config.MetricsProvider != nil {
|
|
metricsFactory = &queueMetricsFactory{
|
|
metricsProvider: config.MetricsProvider,
|
|
}
|
|
} else {
|
|
metricsFactory = &globalMetricsFactory
|
|
}
|
|
|
|
if config.Clock == nil {
|
|
config.Clock = clock.RealClock{}
|
|
}
|
|
|
|
return newQueue(
|
|
config.Clock,
|
|
metricsFactory.newQueueMetrics(config.Name, config.Clock),
|
|
updatePeriod,
|
|
)
|
|
}
|
|
|
|
func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type {
|
|
t := &Type{
|
|
clock: c,
|
|
dirty: set{},
|
|
processing: set{},
|
|
cond: sync.NewCond(&sync.Mutex{}),
|
|
metrics: metrics,
|
|
unfinishedWorkUpdatePeriod: updatePeriod,
|
|
}
|
|
|
|
// Don't start the goroutine for a type of noMetrics so we don't consume
|
|
// resources unnecessarily
|
|
if _, ok := metrics.(noMetrics); !ok {
|
|
go t.updateUnfinishedWorkLoop()
|
|
}
|
|
|
|
return t
|
|
}
|
|
|
|
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
|
|
|
|
// Type is a work queue (see the package comment).
|
|
type Type struct {
|
|
// queue defines the order in which we will work on items. Every
|
|
// element of queue should be in the dirty set and not in the
|
|
// processing set.
|
|
queue []t
|
|
|
|
// dirty defines all of the items that need to be processed.
|
|
dirty set
|
|
|
|
// Things that are currently being processed are in the processing set.
|
|
// These things may be simultaneously in the dirty set. When we finish
|
|
// processing something and remove it from this set, we'll check if
|
|
// it's in the dirty set, and if so, add it to the queue.
|
|
processing set
|
|
|
|
cond *sync.Cond
|
|
|
|
shuttingDown bool
|
|
drain bool
|
|
|
|
metrics queueMetrics
|
|
|
|
unfinishedWorkUpdatePeriod time.Duration
|
|
clock clock.WithTicker
|
|
}
|
|
|
|
type empty struct{}
|
|
type t interface{}
|
|
type set map[t]empty
|
|
|
|
func (s set) has(item t) bool {
|
|
_, exists := s[item]
|
|
return exists
|
|
}
|
|
|
|
func (s set) insert(item t) {
|
|
s[item] = empty{}
|
|
}
|
|
|
|
func (s set) delete(item t) {
|
|
delete(s, item)
|
|
}
|
|
|
|
func (s set) len() int {
|
|
return len(s)
|
|
}
|
|
|
|
// Add marks item as needing processing.
|
|
func (q *Type) Add(item interface{}) {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
if q.shuttingDown {
|
|
return
|
|
}
|
|
if q.dirty.has(item) {
|
|
return
|
|
}
|
|
|
|
q.metrics.add(item)
|
|
|
|
q.dirty.insert(item)
|
|
if q.processing.has(item) {
|
|
return
|
|
}
|
|
|
|
q.queue = append(q.queue, item)
|
|
q.cond.Signal()
|
|
}
|
|
|
|
// Len returns the current queue length, for informational purposes only. You
|
|
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
|
|
// value, that can't be synchronized properly.
|
|
func (q *Type) Len() int {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
return len(q.queue)
|
|
}
|
|
|
|
// Get blocks until it can return an item to be processed. If shutdown = true,
|
|
// the caller should end their goroutine. You must call Done with item when you
|
|
// have finished processing it.
|
|
func (q *Type) Get() (item interface{}, shutdown bool) {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
for len(q.queue) == 0 && !q.shuttingDown {
|
|
q.cond.Wait()
|
|
}
|
|
if len(q.queue) == 0 {
|
|
// We must be shutting down.
|
|
return nil, true
|
|
}
|
|
|
|
item = q.queue[0]
|
|
// The underlying array still exists and reference this object, so the object will not be garbage collected.
|
|
q.queue[0] = nil
|
|
q.queue = q.queue[1:]
|
|
|
|
q.metrics.get(item)
|
|
|
|
q.processing.insert(item)
|
|
q.dirty.delete(item)
|
|
|
|
return item, false
|
|
}
|
|
|
|
// Done marks item as done processing, and if it has been marked as dirty again
|
|
// while it was being processed, it will be re-added to the queue for
|
|
// re-processing.
|
|
func (q *Type) Done(item interface{}) {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
|
|
q.metrics.done(item)
|
|
|
|
q.processing.delete(item)
|
|
if q.dirty.has(item) {
|
|
q.queue = append(q.queue, item)
|
|
q.cond.Signal()
|
|
} else if q.processing.len() == 0 {
|
|
q.cond.Signal()
|
|
}
|
|
}
|
|
|
|
// ShutDown will cause q to ignore all new items added to it and
|
|
// immediately instruct the worker goroutines to exit.
|
|
func (q *Type) ShutDown() {
|
|
q.setDrain(false)
|
|
q.shutdown()
|
|
}
|
|
|
|
// ShutDownWithDrain will cause q to ignore all new items added to it. As soon
|
|
// as the worker goroutines have "drained", i.e: finished processing and called
|
|
// Done on all existing items in the queue; they will be instructed to exit and
|
|
// ShutDownWithDrain will return. Hence: a strict requirement for using this is;
|
|
// your workers must ensure that Done is called on all items in the queue once
|
|
// the shut down has been initiated, if that is not the case: this will block
|
|
// indefinitely. It is, however, safe to call ShutDown after having called
|
|
// ShutDownWithDrain, as to force the queue shut down to terminate immediately
|
|
// without waiting for the drainage.
|
|
func (q *Type) ShutDownWithDrain() {
|
|
q.setDrain(true)
|
|
q.shutdown()
|
|
for q.isProcessing() && q.shouldDrain() {
|
|
q.waitForProcessing()
|
|
}
|
|
}
|
|
|
|
// isProcessing indicates if there are still items on the work queue being
|
|
// processed. It's used to drain the work queue on an eventual shutdown.
|
|
func (q *Type) isProcessing() bool {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
return q.processing.len() != 0
|
|
}
|
|
|
|
// waitForProcessing waits for the worker goroutines to finish processing items
|
|
// and call Done on them.
|
|
func (q *Type) waitForProcessing() {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
// Ensure that we do not wait on a queue which is already empty, as that
|
|
// could result in waiting for Done to be called on items in an empty queue
|
|
// which has already been shut down, which will result in waiting
|
|
// indefinitely.
|
|
if q.processing.len() == 0 {
|
|
return
|
|
}
|
|
q.cond.Wait()
|
|
}
|
|
|
|
func (q *Type) setDrain(shouldDrain bool) {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
q.drain = shouldDrain
|
|
}
|
|
|
|
func (q *Type) shouldDrain() bool {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
return q.drain
|
|
}
|
|
|
|
func (q *Type) shutdown() {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
q.shuttingDown = true
|
|
q.cond.Broadcast()
|
|
}
|
|
|
|
func (q *Type) ShuttingDown() bool {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
|
|
return q.shuttingDown
|
|
}
|
|
|
|
func (q *Type) updateUnfinishedWorkLoop() {
|
|
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
|
|
defer t.Stop()
|
|
for range t.C() {
|
|
if !func() bool {
|
|
q.cond.L.Lock()
|
|
defer q.cond.L.Unlock()
|
|
if !q.shuttingDown {
|
|
q.metrics.updateUnfinishedWork()
|
|
return true
|
|
}
|
|
return false
|
|
|
|
}() {
|
|
return
|
|
}
|
|
}
|
|
}
|