mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
49
vendor/k8s.io/client-go/util/apply/apply.go
generated
vendored
Normal file
49
vendor/k8s.io/client-go/util/apply/apply.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apply
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/client-go/features"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// NewRequest builds a new server-side apply request. The provided apply configuration object will
|
||||
// be marshalled to the request's body using the default encoding, and the Content-Type header will
|
||||
// be set to application/apply-patch with the appropriate structured syntax name suffix (today,
|
||||
// either +yaml or +cbor, see
|
||||
// https://www.iana.org/assignments/media-type-structured-suffix/media-type-structured-suffix.xhtml).
|
||||
func NewRequest(client rest.Interface, applyConfiguration interface{}) (*rest.Request, error) {
|
||||
pt := types.ApplyYAMLPatchType
|
||||
marshal := json.Marshal
|
||||
|
||||
if features.FeatureGates().Enabled(features.ClientsAllowCBOR) && features.FeatureGates().Enabled(features.ClientsPreferCBOR) {
|
||||
pt = types.ApplyCBORPatchType
|
||||
marshal = cbor.Marshal
|
||||
}
|
||||
|
||||
body, err := marshal(applyConfiguration)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal apply configuration: %w", err)
|
||||
}
|
||||
|
||||
return client.Patch(pt).Body(body), nil
|
||||
}
|
8
vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
generated
vendored
8
vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go
generated
vendored
@ -32,6 +32,12 @@ func init() {
|
||||
dataConsistencyDetectionForListFromCacheEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR"))
|
||||
}
|
||||
|
||||
// IsDataConsistencyDetectionForListEnabled returns true when
|
||||
// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
|
||||
func IsDataConsistencyDetectionForListEnabled() bool {
|
||||
return dataConsistencyDetectionForListFromCacheEnabled
|
||||
}
|
||||
|
||||
// CheckListFromCacheDataConsistencyIfRequested performs a data consistency check only when
|
||||
// the KUBE_LIST_FROM_CACHE_INCONSISTENCY_DETECTOR environment variable was set during a binary startup
|
||||
// for requests that have a high chance of being served from the watch-cache.
|
||||
@ -50,7 +56,7 @@ func init() {
|
||||
// the cache (even though this might not be true for some requests)
|
||||
// and issue the second call to get data from etcd for comparison.
|
||||
func CheckListFromCacheDataConsistencyIfRequested[T runtime.Object](ctx context.Context, identity string, listItemsFn ListFunc[T], optionsUsedToReceiveList metav1.ListOptions, receivedList runtime.Object) {
|
||||
if !dataConsistencyDetectionForListFromCacheEnabled {
|
||||
if !IsDataConsistencyDetectionForListEnabled() {
|
||||
return
|
||||
}
|
||||
checkListFromCacheDataConsistencyIfRequestedInternal(ctx, identity, listItemsFn, optionsUsedToReceiveList, receivedList)
|
||||
|
28
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
28
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
@ -32,7 +32,12 @@ type backoffEntry struct {
|
||||
|
||||
type Backoff struct {
|
||||
sync.RWMutex
|
||||
Clock clock.Clock
|
||||
Clock clock.Clock
|
||||
// HasExpiredFunc controls the logic that determines whether the backoff
|
||||
// counter should be reset, and when to GC old backoff entries. If nil, the
|
||||
// default hasExpired function will restart the backoff factor to the
|
||||
// beginning after observing time has passed at least equal to 2*maxDuration
|
||||
HasExpiredFunc func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool
|
||||
defaultDuration time.Duration
|
||||
maxDuration time.Duration
|
||||
perItemBackoff map[string]*backoffEntry
|
||||
@ -93,7 +98,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
entry = p.initEntryUnsafe(id)
|
||||
entry.backoff += p.jitter(entry.backoff)
|
||||
} else {
|
||||
@ -119,7 +124,7 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
return false
|
||||
}
|
||||
return p.Clock.Since(eventTime) < entry.backoff
|
||||
@ -133,21 +138,21 @@ func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
return false
|
||||
}
|
||||
return eventTime.Sub(entry.lastUpdate) < entry.backoff
|
||||
}
|
||||
|
||||
// Garbage collect records that have aged past maxDuration. Backoff users are expected
|
||||
// to invoke this periodically.
|
||||
// Garbage collect records that have aged past their expiration, which defaults
|
||||
// to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke
|
||||
// this periodically.
|
||||
func (p *Backoff) GC() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
now := p.Clock.Now()
|
||||
for id, entry := range p.perItemBackoff {
|
||||
if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
|
||||
// GC when entry has not been updated for 2*maxDuration
|
||||
if p.hasExpired(now, entry.lastUpdate, p.maxDuration) {
|
||||
delete(p.perItemBackoff, id)
|
||||
}
|
||||
}
|
||||
@ -174,7 +179,10 @@ func (p *Backoff) jitter(delay time.Duration) time.Duration {
|
||||
return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay))
|
||||
}
|
||||
|
||||
// After 2*maxDuration we restart the backoff factor to the beginning
|
||||
func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
|
||||
// Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning
|
||||
func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
|
||||
if p.HasExpiredFunc != nil {
|
||||
return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration)
|
||||
}
|
||||
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
|
||||
}
|
||||
|
63
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
63
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
@ -64,26 +64,33 @@ type TypedDelayingQueueConfig[T comparable] struct {
|
||||
// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
|
||||
// NewDelayingQueueWithConfig instead and specify a name.
|
||||
//
|
||||
// Deprecated: use TypedNewDelayingQueue instead.
|
||||
// Deprecated: use NewTypedDelayingQueue instead.
|
||||
func NewDelayingQueue() DelayingInterface {
|
||||
return NewDelayingQueueWithConfig(DelayingQueueConfig{})
|
||||
}
|
||||
|
||||
// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability.
|
||||
// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
|
||||
// TypedNewDelayingQueueWithConfig instead and specify a name.
|
||||
func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
|
||||
// NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability.
|
||||
// NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
|
||||
// NewTypedDelayingQueueWithConfig instead and specify a name.
|
||||
func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] {
|
||||
return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{})
|
||||
}
|
||||
|
||||
// NewDelayingQueueWithConfig constructs a new workqueue with options to
|
||||
// customize different properties.
|
||||
//
|
||||
// Deprecated: use TypedNewDelayingQueueWithConfig instead.
|
||||
// Deprecated: use NewTypedDelayingQueueWithConfig instead.
|
||||
func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface {
|
||||
return NewTypedDelayingQueueWithConfig[any](config)
|
||||
}
|
||||
|
||||
// TypedNewDelayingQueue exists for backwards compatibility only.
|
||||
//
|
||||
// Deprecated: use NewTypedDelayingQueueWithConfig instead.
|
||||
func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
|
||||
return NewTypedDelayingQueue[T]()
|
||||
}
|
||||
|
||||
// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to
|
||||
// customize different properties.
|
||||
func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] {
|
||||
@ -134,7 +141,7 @@ func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T],
|
||||
clock: clock,
|
||||
heartbeat: clock.NewTicker(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
waitingForAddCh: make(chan *waitFor, 1000),
|
||||
waitingForAddCh: make(chan *waitFor[T], 1000),
|
||||
metrics: newRetryMetrics(name, provider),
|
||||
}
|
||||
|
||||
@ -158,15 +165,15 @@ type delayingType[T comparable] struct {
|
||||
heartbeat clock.Ticker
|
||||
|
||||
// waitingForAddCh is a buffered channel that feeds waitingForAdd
|
||||
waitingForAddCh chan *waitFor
|
||||
waitingForAddCh chan *waitFor[T]
|
||||
|
||||
// metrics counts the number of retries
|
||||
metrics retryMetrics
|
||||
}
|
||||
|
||||
// waitFor holds the data to add and the time it should be added
|
||||
type waitFor struct {
|
||||
data t
|
||||
type waitFor[T any] struct {
|
||||
data T
|
||||
readyAt time.Time
|
||||
// index in the priority queue (heap)
|
||||
index int
|
||||
@ -180,15 +187,15 @@ type waitFor struct {
|
||||
// it has been removed from the queue and placed at index Len()-1 by
|
||||
// container/heap. Push adds an item at index Len(), and container/heap
|
||||
// percolates it into the correct location.
|
||||
type waitForPriorityQueue []*waitFor
|
||||
type waitForPriorityQueue[T any] []*waitFor[T]
|
||||
|
||||
func (pq waitForPriorityQueue) Len() int {
|
||||
func (pq waitForPriorityQueue[T]) Len() int {
|
||||
return len(pq)
|
||||
}
|
||||
func (pq waitForPriorityQueue) Less(i, j int) bool {
|
||||
func (pq waitForPriorityQueue[T]) Less(i, j int) bool {
|
||||
return pq[i].readyAt.Before(pq[j].readyAt)
|
||||
}
|
||||
func (pq waitForPriorityQueue) Swap(i, j int) {
|
||||
func (pq waitForPriorityQueue[T]) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
@ -196,16 +203,16 @@ func (pq waitForPriorityQueue) Swap(i, j int) {
|
||||
|
||||
// Push adds an item to the queue. Push should not be called directly; instead,
|
||||
// use `heap.Push`.
|
||||
func (pq *waitForPriorityQueue) Push(x interface{}) {
|
||||
func (pq *waitForPriorityQueue[T]) Push(x interface{}) {
|
||||
n := len(*pq)
|
||||
item := x.(*waitFor)
|
||||
item := x.(*waitFor[T])
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
|
||||
// Pop removes an item from the queue. Pop should not be called directly;
|
||||
// instead, use `heap.Pop`.
|
||||
func (pq *waitForPriorityQueue) Pop() interface{} {
|
||||
func (pq *waitForPriorityQueue[T]) Pop() interface{} {
|
||||
n := len(*pq)
|
||||
item := (*pq)[n-1]
|
||||
item.index = -1
|
||||
@ -215,7 +222,7 @@ func (pq *waitForPriorityQueue) Pop() interface{} {
|
||||
|
||||
// Peek returns the item at the beginning of the queue, without removing the
|
||||
// item or otherwise mutating the queue. It is safe to call directly.
|
||||
func (pq waitForPriorityQueue) Peek() interface{} {
|
||||
func (pq waitForPriorityQueue[T]) Peek() interface{} {
|
||||
return pq[0]
|
||||
}
|
||||
|
||||
@ -247,7 +254,7 @@ func (q *delayingType[T]) AddAfter(item T, duration time.Duration) {
|
||||
select {
|
||||
case <-q.stopCh:
|
||||
// unblock if ShutDown() is called
|
||||
case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
|
||||
case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}:
|
||||
}
|
||||
}
|
||||
|
||||
@ -266,10 +273,10 @@ func (q *delayingType[T]) waitingLoop() {
|
||||
// Make a timer that expires when the item at the head of the waiting queue is ready
|
||||
var nextReadyAtTimer clock.Timer
|
||||
|
||||
waitingForQueue := &waitForPriorityQueue{}
|
||||
waitingForQueue := &waitForPriorityQueue[T]{}
|
||||
heap.Init(waitingForQueue)
|
||||
|
||||
waitingEntryByData := map[t]*waitFor{}
|
||||
waitingEntryByData := map[T]*waitFor[T]{}
|
||||
|
||||
for {
|
||||
if q.TypedInterface.ShuttingDown() {
|
||||
@ -280,13 +287,13 @@ func (q *delayingType[T]) waitingLoop() {
|
||||
|
||||
// Add ready entries
|
||||
for waitingForQueue.Len() > 0 {
|
||||
entry := waitingForQueue.Peek().(*waitFor)
|
||||
entry := waitingForQueue.Peek().(*waitFor[T])
|
||||
if entry.readyAt.After(now) {
|
||||
break
|
||||
}
|
||||
|
||||
entry = heap.Pop(waitingForQueue).(*waitFor)
|
||||
q.Add(entry.data.(T))
|
||||
entry = heap.Pop(waitingForQueue).(*waitFor[T])
|
||||
q.Add(entry.data)
|
||||
delete(waitingEntryByData, entry.data)
|
||||
}
|
||||
|
||||
@ -296,7 +303,7 @@ func (q *delayingType[T]) waitingLoop() {
|
||||
if nextReadyAtTimer != nil {
|
||||
nextReadyAtTimer.Stop()
|
||||
}
|
||||
entry := waitingForQueue.Peek().(*waitFor)
|
||||
entry := waitingForQueue.Peek().(*waitFor[T])
|
||||
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
|
||||
nextReadyAt = nextReadyAtTimer.C()
|
||||
}
|
||||
@ -315,7 +322,7 @@ func (q *delayingType[T]) waitingLoop() {
|
||||
if waitEntry.readyAt.After(q.clock.Now()) {
|
||||
insert(waitingForQueue, waitingEntryByData, waitEntry)
|
||||
} else {
|
||||
q.Add(waitEntry.data.(T))
|
||||
q.Add(waitEntry.data)
|
||||
}
|
||||
|
||||
drained := false
|
||||
@ -325,7 +332,7 @@ func (q *delayingType[T]) waitingLoop() {
|
||||
if waitEntry.readyAt.After(q.clock.Now()) {
|
||||
insert(waitingForQueue, waitingEntryByData, waitEntry)
|
||||
} else {
|
||||
q.Add(waitEntry.data.(T))
|
||||
q.Add(waitEntry.data)
|
||||
}
|
||||
default:
|
||||
drained = true
|
||||
@ -336,7 +343,7 @@ func (q *delayingType[T]) waitingLoop() {
|
||||
}
|
||||
|
||||
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
|
||||
func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
|
||||
func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) {
|
||||
// if the entry already exists, update the time only if it would cause the item to be queued sooner
|
||||
existing, exists := knownEntries[entry.data]
|
||||
if exists {
|
||||
|
67
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
67
vendor/k8s.io/client-go/util/workqueue/metrics.go
generated
vendored
@ -26,10 +26,10 @@ import (
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
// of metrics.
|
||||
|
||||
type queueMetrics interface {
|
||||
add(item t)
|
||||
get(item t)
|
||||
done(item t)
|
||||
type queueMetrics[T comparable] interface {
|
||||
add(item T)
|
||||
get(item T)
|
||||
done(item T)
|
||||
updateUnfinishedWork()
|
||||
}
|
||||
|
||||
@ -70,7 +70,7 @@ func (noopMetric) Set(float64) {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
|
||||
// defaultQueueMetrics expects the caller to lock before setting any metrics.
|
||||
type defaultQueueMetrics struct {
|
||||
type defaultQueueMetrics[T comparable] struct {
|
||||
clock clock.Clock
|
||||
|
||||
// current depth of a workqueue
|
||||
@ -81,15 +81,15 @@ type defaultQueueMetrics struct {
|
||||
latency HistogramMetric
|
||||
// how long processing an item from a workqueue takes
|
||||
workDuration HistogramMetric
|
||||
addTimes map[t]time.Time
|
||||
processingStartTimes map[t]time.Time
|
||||
addTimes map[T]time.Time
|
||||
processingStartTimes map[T]time.Time
|
||||
|
||||
// how long have current threads been working?
|
||||
unfinishedWorkSeconds SettableGaugeMetric
|
||||
longestRunningProcessor SettableGaugeMetric
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) add(item t) {
|
||||
func (m *defaultQueueMetrics[T]) add(item T) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
@ -101,7 +101,7 @@ func (m *defaultQueueMetrics) add(item t) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) get(item t) {
|
||||
func (m *defaultQueueMetrics[T]) get(item T) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
@ -114,7 +114,7 @@ func (m *defaultQueueMetrics) get(item t) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) done(item t) {
|
||||
func (m *defaultQueueMetrics[T]) done(item T) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
@ -125,7 +125,7 @@ func (m *defaultQueueMetrics) done(item t) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defaultQueueMetrics) updateUnfinishedWork() {
|
||||
func (m *defaultQueueMetrics[T]) updateUnfinishedWork() {
|
||||
// Note that a summary metric would be better for this, but prometheus
|
||||
// doesn't seem to have non-hacky ways to reset the summary metrics.
|
||||
var total float64
|
||||
@ -141,15 +141,15 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() {
|
||||
m.longestRunningProcessor.Set(oldest)
|
||||
}
|
||||
|
||||
type noMetrics struct{}
|
||||
type noMetrics[T any] struct{}
|
||||
|
||||
func (noMetrics) add(item t) {}
|
||||
func (noMetrics) get(item t) {}
|
||||
func (noMetrics) done(item t) {}
|
||||
func (noMetrics) updateUnfinishedWork() {}
|
||||
func (noMetrics[T]) add(item T) {}
|
||||
func (noMetrics[T]) get(item T) {}
|
||||
func (noMetrics[T]) done(item T) {}
|
||||
func (noMetrics[T]) updateUnfinishedWork() {}
|
||||
|
||||
// Gets the time since the specified start in seconds.
|
||||
func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
|
||||
func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 {
|
||||
return m.clock.Since(start).Seconds()
|
||||
}
|
||||
|
||||
@ -210,28 +210,15 @@ func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var globalMetricsFactory = queueMetricsFactory{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
var globalMetricsProvider MetricsProvider = noopMetricsProvider{}
|
||||
|
||||
type queueMetricsFactory struct {
|
||||
metricsProvider MetricsProvider
|
||||
var setGlobalMetricsProviderOnce sync.Once
|
||||
|
||||
onlyOnce sync.Once
|
||||
}
|
||||
|
||||
func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
|
||||
f.onlyOnce.Do(func() {
|
||||
f.metricsProvider = mp
|
||||
})
|
||||
}
|
||||
|
||||
func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
|
||||
mp := f.metricsProvider
|
||||
func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] {
|
||||
if len(name) == 0 || mp == (noopMetricsProvider{}) {
|
||||
return noMetrics{}
|
||||
return noMetrics[T]{}
|
||||
}
|
||||
return &defaultQueueMetrics{
|
||||
return &defaultQueueMetrics[T]{
|
||||
clock: clock,
|
||||
depth: mp.NewDepthMetric(name),
|
||||
adds: mp.NewAddsMetric(name),
|
||||
@ -239,8 +226,8 @@ func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) qu
|
||||
workDuration: mp.NewWorkDurationMetric(name),
|
||||
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
|
||||
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
|
||||
addTimes: map[t]time.Time{},
|
||||
processingStartTimes: map[t]time.Time{},
|
||||
addTimes: map[T]time.Time{},
|
||||
processingStartTimes: map[T]time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -251,7 +238,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
|
||||
}
|
||||
|
||||
if provider == nil {
|
||||
provider = globalMetricsFactory.metricsProvider
|
||||
provider = globalMetricsProvider
|
||||
}
|
||||
|
||||
return &defaultRetryMetrics{
|
||||
@ -262,5 +249,7 @@ func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
|
||||
// SetProvider sets the metrics provider for all subsequently created work
|
||||
// queues. Only the first call has an effect.
|
||||
func SetProvider(metricsProvider MetricsProvider) {
|
||||
globalMetricsFactory.setProvider(metricsProvider)
|
||||
setGlobalMetricsProviderOnce.Do(func() {
|
||||
globalMetricsProvider = metricsProvider
|
||||
})
|
||||
}
|
||||
|
17
vendor/k8s.io/client-go/util/workqueue/queue.go
generated
vendored
17
vendor/k8s.io/client-go/util/workqueue/queue.go
generated
vendored
@ -138,13 +138,9 @@ func NewNamed(name string) *Type {
|
||||
// newQueueWithConfig constructs a new named workqueue
|
||||
// with the ability to customize different properties for testing purposes
|
||||
func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] {
|
||||
var metricsFactory *queueMetricsFactory
|
||||
metricsProvider := globalMetricsProvider
|
||||
if config.MetricsProvider != nil {
|
||||
metricsFactory = &queueMetricsFactory{
|
||||
metricsProvider: config.MetricsProvider,
|
||||
}
|
||||
} else {
|
||||
metricsFactory = &globalMetricsFactory
|
||||
metricsProvider = config.MetricsProvider
|
||||
}
|
||||
|
||||
if config.Clock == nil {
|
||||
@ -158,12 +154,12 @@ func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod t
|
||||
return newQueue(
|
||||
config.Clock,
|
||||
config.Queue,
|
||||
metricsFactory.newQueueMetrics(config.Name, config.Clock),
|
||||
newQueueMetrics[T](metricsProvider, config.Name, config.Clock),
|
||||
updatePeriod,
|
||||
)
|
||||
}
|
||||
|
||||
func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] {
|
||||
func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] {
|
||||
t := &Typed[T]{
|
||||
clock: c,
|
||||
queue: queue,
|
||||
@ -176,7 +172,7 @@ func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMet
|
||||
|
||||
// Don't start the goroutine for a type of noMetrics so we don't consume
|
||||
// resources unnecessarily
|
||||
if _, ok := metrics.(noMetrics); !ok {
|
||||
if _, ok := metrics.(noMetrics[T]); !ok {
|
||||
go t.updateUnfinishedWorkLoop()
|
||||
}
|
||||
|
||||
@ -209,14 +205,13 @@ type Typed[t comparable] struct {
|
||||
shuttingDown bool
|
||||
drain bool
|
||||
|
||||
metrics queueMetrics
|
||||
metrics queueMetrics[t]
|
||||
|
||||
unfinishedWorkUpdatePeriod time.Duration
|
||||
clock clock.WithTicker
|
||||
}
|
||||
|
||||
type empty struct{}
|
||||
type t interface{}
|
||||
type set[t comparable] map[t]empty
|
||||
|
||||
func (s set[t]) has(item t) bool {
|
||||
|
Reference in New Issue
Block a user