rebase: bump sigs.k8s.io/controller-runtime

Bumps the k8s-dependencies group with 1 update: [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime).


Updates `sigs.k8s.io/controller-runtime` from 0.20.4 to 0.21.0
- [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases)
- [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md)
- [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.20.4...v0.21.0)

---
updated-dependencies:
- dependency-name: sigs.k8s.io/controller-runtime
  dependency-version: 0.21.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: k8s-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-05-26 20:24:19 +00:00
committed by mergify[bot]
parent d05ebd3456
commit eb13efc9df
27 changed files with 399 additions and 232 deletions

View File

@ -26,6 +26,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/config"
"sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue"
"sigs.k8s.io/controller-runtime/pkg/internal/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
@ -80,13 +81,53 @@ type TypedOptions[request comparable] struct {
// Only use a custom NewQueue if you know what you are doing.
NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request]
// Logger will be used to build a default LogConstructor if unset.
Logger logr.Logger
// LogConstructor is used to construct a logger used for this controller and passed
// to each reconciliation via the context field.
LogConstructor func(request *request) logr.Logger
// UsePriorityQueue configures the controllers queue to use the controller-runtime provided
// priority queue.
//
// Note: This flag is disabled by default until a future version. It's currently in beta.
UsePriorityQueue *bool
}
// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests
// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item.
// DefaultFromConfig defaults the config from a config.Controller
func (options *TypedOptions[request]) DefaultFromConfig(config config.Controller) {
if options.Logger.GetSink() == nil {
options.Logger = config.Logger
}
if options.SkipNameValidation == nil {
options.SkipNameValidation = config.SkipNameValidation
}
if options.MaxConcurrentReconciles <= 0 && config.MaxConcurrentReconciles > 0 {
options.MaxConcurrentReconciles = config.MaxConcurrentReconciles
}
if options.CacheSyncTimeout == 0 && config.CacheSyncTimeout > 0 {
options.CacheSyncTimeout = config.CacheSyncTimeout
}
if options.UsePriorityQueue == nil {
options.UsePriorityQueue = config.UsePriorityQueue
}
if options.RecoverPanic == nil {
options.RecoverPanic = config.RecoverPanic
}
if options.NeedLeaderElection == nil {
options.NeedLeaderElection = config.NeedLeaderElection
}
}
// Controller implements an API. A Controller manages a work queue fed reconcile.Requests
// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item.
// Work typically is reads and writes Kubernetes objects to make the system state match the state specified
// in the object Spec.
type Controller = TypedController[reconcile.Request]
@ -119,7 +160,8 @@ func New(name string, mgr manager.Manager, options Options) (Controller, error)
//
// The name must be unique as it is used to identify the controller in metrics and logs.
func NewTyped[request comparable](name string, mgr manager.Manager, options TypedOptions[request]) (TypedController[request], error) {
c, err := NewTypedUnmanaged(name, mgr, options)
options.DefaultFromConfig(mgr.GetControllerOptions())
c, err := NewTypedUnmanaged(name, options)
if err != nil {
return nil, err
}
@ -132,14 +174,14 @@ func NewTyped[request comparable](name string, mgr manager.Manager, options Type
// caller is responsible for starting the returned controller.
//
// The name must be unique as it is used to identify the controller in metrics and logs.
func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller, error) {
return NewTypedUnmanaged(name, mgr, options)
func NewUnmanaged(name string, options Options) (Controller, error) {
return NewTypedUnmanaged(name, options)
}
// NewTypedUnmanaged returns a new typed controller without adding it to the manager.
//
// The name must be unique as it is used to identify the controller in metrics and logs.
func NewTypedUnmanaged[request comparable](name string, mgr manager.Manager, options TypedOptions[request]) (TypedController[request], error) {
func NewTypedUnmanaged[request comparable](name string, options TypedOptions[request]) (TypedController[request], error) {
if options.Reconciler == nil {
return nil, fmt.Errorf("must specify Reconciler")
}
@ -148,10 +190,6 @@ func NewTypedUnmanaged[request comparable](name string, mgr manager.Manager, opt
return nil, fmt.Errorf("must specify Name for Controller")
}
if options.SkipNameValidation == nil {
options.SkipNameValidation = mgr.GetControllerOptions().SkipNameValidation
}
if options.SkipNameValidation == nil || !*options.SkipNameValidation {
if err := checkName(name); err != nil {
return nil, err
@ -159,7 +197,7 @@ func NewTypedUnmanaged[request comparable](name string, mgr manager.Manager, opt
}
if options.LogConstructor == nil {
log := mgr.GetLogger().WithValues(
log := options.Logger.WithValues(
"controller", name,
)
options.LogConstructor = func(in *request) logr.Logger {
@ -175,23 +213,15 @@ func NewTypedUnmanaged[request comparable](name string, mgr manager.Manager, opt
}
if options.MaxConcurrentReconciles <= 0 {
if mgr.GetControllerOptions().MaxConcurrentReconciles > 0 {
options.MaxConcurrentReconciles = mgr.GetControllerOptions().MaxConcurrentReconciles
} else {
options.MaxConcurrentReconciles = 1
}
options.MaxConcurrentReconciles = 1
}
if options.CacheSyncTimeout == 0 {
if mgr.GetControllerOptions().CacheSyncTimeout != 0 {
options.CacheSyncTimeout = mgr.GetControllerOptions().CacheSyncTimeout
} else {
options.CacheSyncTimeout = 2 * time.Minute
}
options.CacheSyncTimeout = 2 * time.Minute
}
if options.RateLimiter == nil {
if ptr.Deref(mgr.GetControllerOptions().UsePriorityQueue, false) {
if ptr.Deref(options.UsePriorityQueue, false) {
options.RateLimiter = workqueue.NewTypedItemExponentialFailureRateLimiter[request](5*time.Millisecond, 1000*time.Second)
} else {
options.RateLimiter = workqueue.DefaultTypedControllerRateLimiter[request]()
@ -200,9 +230,9 @@ func NewTypedUnmanaged[request comparable](name string, mgr manager.Manager, opt
if options.NewQueue == nil {
options.NewQueue = func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] {
if ptr.Deref(mgr.GetControllerOptions().UsePriorityQueue, false) {
if ptr.Deref(options.UsePriorityQueue, false) {
return priorityqueue.New(controllerName, func(o *priorityqueue.Opts[request]) {
o.Log = mgr.GetLogger().WithValues("controller", controllerName)
o.Log = options.Logger.WithValues("controller", controllerName)
o.RateLimiter = rateLimiter
})
}
@ -212,14 +242,6 @@ func NewTypedUnmanaged[request comparable](name string, mgr manager.Manager, opt
}
}
if options.RecoverPanic == nil {
options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic
}
if options.NeedLeaderElection == nil {
options.NeedLeaderElection = mgr.GetControllerOptions().NeedLeaderElection
}
// Create controller with dependencies set
return &controller.Controller[request]{
Do: options.Reconciler,

View File

@ -6,6 +6,7 @@ import (
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/clock"
"sigs.k8s.io/controller-runtime/pkg/internal/metrics"
)
// This file is mostly a copy of unexported code from
@ -14,8 +15,9 @@ import (
// The only two differences are the addition of mapLock in defaultQueueMetrics and converging retryMetrics into queueMetrics.
type queueMetrics[T comparable] interface {
add(item T)
get(item T)
add(item T, priority int)
get(item T, priority int)
updateDepthWithPriorityMetric(oldPriority, newPriority int)
done(item T)
updateUnfinishedWork()
retry()
@ -25,9 +27,9 @@ func newQueueMetrics[T comparable](mp workqueue.MetricsProvider, name string, cl
if len(name) == 0 {
return noMetrics[T]{}
}
return &defaultQueueMetrics[T]{
dqm := &defaultQueueMetrics[T]{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
@ -37,6 +39,13 @@ func newQueueMetrics[T comparable](mp workqueue.MetricsProvider, name string, cl
processingStartTimes: map[T]time.Time{},
retries: mp.NewRetriesMetric(name),
}
if mpp, ok := mp.(metrics.MetricsProviderWithPriority); ok {
dqm.depthWithPriority = mpp.NewDepthMetricWithPriority(name)
} else {
dqm.depth = mp.NewDepthMetric(name)
}
return dqm
}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
@ -44,7 +53,8 @@ type defaultQueueMetrics[T comparable] struct {
clock clock.Clock
// current depth of a workqueue
depth workqueue.GaugeMetric
depth workqueue.GaugeMetric
depthWithPriority metrics.DepthMetricWithPriority
// total number of adds handled by a workqueue
adds workqueue.CounterMetric
// how long an item stays in a workqueue
@ -64,13 +74,17 @@ type defaultQueueMetrics[T comparable] struct {
}
// add is called for ready items only
func (m *defaultQueueMetrics[T]) add(item T) {
func (m *defaultQueueMetrics[T]) add(item T, priority int) {
if m == nil {
return
}
m.adds.Inc()
m.depth.Inc()
if m.depthWithPriority != nil {
m.depthWithPriority.Inc(priority)
} else {
m.depth.Inc()
}
m.mapLock.Lock()
defer m.mapLock.Unlock()
@ -80,12 +94,16 @@ func (m *defaultQueueMetrics[T]) add(item T) {
}
}
func (m *defaultQueueMetrics[T]) get(item T) {
func (m *defaultQueueMetrics[T]) get(item T, priority int) {
if m == nil {
return
}
m.depth.Dec()
if m.depthWithPriority != nil {
m.depthWithPriority.Dec(priority)
} else {
m.depth.Dec()
}
m.mapLock.Lock()
defer m.mapLock.Unlock()
@ -97,6 +115,13 @@ func (m *defaultQueueMetrics[T]) get(item T) {
}
}
func (m *defaultQueueMetrics[T]) updateDepthWithPriorityMetric(oldPriority, newPriority int) {
if m.depthWithPriority != nil {
m.depthWithPriority.Dec(oldPriority)
m.depthWithPriority.Inc(newPriority)
}
}
func (m *defaultQueueMetrics[T]) done(item T) {
if m == nil {
return
@ -139,8 +164,9 @@ func (m *defaultQueueMetrics[T]) retry() {
type noMetrics[T any] struct{}
func (noMetrics[T]) add(item T) {}
func (noMetrics[T]) get(item T) {}
func (noMetrics[T]) done(item T) {}
func (noMetrics[T]) updateUnfinishedWork() {}
func (noMetrics[T]) retry() {}
func (noMetrics[T]) add(item T, priority int) {}
func (noMetrics[T]) get(item T, priority int) {}
func (noMetrics[T]) updateDepthWithPriorityMetric(oldPriority, newPriority int) {}
func (noMetrics[T]) done(item T) {}
func (noMetrics[T]) updateUnfinishedWork() {}
func (noMetrics[T]) retry() {}

View File

@ -156,7 +156,7 @@ func (w *priorityqueue[T]) AddWithOpts(o AddOpts, items ...T) {
w.items[key] = item
w.queue.ReplaceOrInsert(item)
if item.ReadyAt == nil {
w.metrics.add(key)
w.metrics.add(key, item.Priority)
}
w.addedCounter++
continue
@ -166,12 +166,16 @@ func (w *priorityqueue[T]) AddWithOpts(o AddOpts, items ...T) {
// will affect the order - Just delete and re-add.
item, _ := w.queue.Delete(w.items[key])
if o.Priority > item.Priority {
// Update depth metric only if the item in the queue was already added to the depth metric.
if item.ReadyAt == nil || w.becameReady.Has(key) {
w.metrics.updateDepthWithPriorityMetric(item.Priority, o.Priority)
}
item.Priority = o.Priority
}
if item.ReadyAt != nil && (readyAt == nil || readyAt.Before(*item.ReadyAt)) {
if readyAt == nil && !w.becameReady.Has(key) {
w.metrics.add(key)
w.metrics.add(key, item.Priority)
}
item.ReadyAt = readyAt
}
@ -223,7 +227,7 @@ func (w *priorityqueue[T]) spin() {
return false
}
if !w.becameReady.Has(item.Key) {
w.metrics.add(item.Key)
w.metrics.add(item.Key, item.Priority)
w.becameReady.Insert(item.Key)
}
}
@ -239,7 +243,7 @@ func (w *priorityqueue[T]) spin() {
return true
}
w.metrics.get(item.Key)
w.metrics.get(item.Key, item.Priority)
w.locked.Insert(item.Key)
w.waiters.Add(-1)
delete(w.items, item.Key)