rebase: bump sigs.k8s.io/controller-runtime

Bumps the k8s-dependencies group with 1 update: [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime).


Updates `sigs.k8s.io/controller-runtime` from 0.20.4 to 0.21.0
- [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases)
- [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/main/RELEASE.md)
- [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.20.4...v0.21.0)

---
updated-dependencies:
- dependency-name: sigs.k8s.io/controller-runtime
  dependency-version: 0.21.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: k8s-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2025-05-26 20:24:19 +00:00
committed by mergify[bot]
parent d05ebd3456
commit eb13efc9df
27 changed files with 399 additions and 232 deletions

View File

@ -174,65 +174,12 @@ func (c *Controller[request]) Start(ctx context.Context) error {
defer c.mu.Unlock()
// TODO(pwittrock): Reconsider HandleCrash
defer utilruntime.HandleCrash()
defer utilruntime.HandleCrashWithLogger(c.LogConstructor(nil))
// NB(directxman12): launch the sources *before* trying to wait for the
// caches to sync so that they have a chance to register their intended
// caches.
errGroup := &errgroup.Group{}
for _, watch := range c.startWatches {
log := c.LogConstructor(nil)
_, ok := watch.(interface {
String() string
})
if !ok {
log = log.WithValues("source", fmt.Sprintf("%T", watch))
} else {
log = log.WithValues("source", fmt.Sprintf("%s", watch))
}
didStartSyncingSource := &atomic.Bool{}
errGroup.Go(func() error {
// Use a timeout for starting and syncing the source to avoid silently
// blocking startup indefinitely if it doesn't come up.
sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout)
defer cancel()
sourceStartErrChan := make(chan error, 1) // Buffer chan to not leak goroutine if we time out
go func() {
defer close(sourceStartErrChan)
log.Info("Starting EventSource")
if err := watch.Start(ctx, c.Queue); err != nil {
sourceStartErrChan <- err
return
}
syncingSource, ok := watch.(source.TypedSyncingSource[request])
if !ok {
return
}
didStartSyncingSource.Store(true)
if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
err := fmt.Errorf("failed to wait for %s caches to sync %v: %w", c.Name, syncingSource, err)
log.Error(err, "Could not wait for Cache to sync")
sourceStartErrChan <- err
}
}()
select {
case err := <-sourceStartErrChan:
return err
case <-sourceStartCtx.Done():
if didStartSyncingSource.Load() { // We are racing with WaitForSync, wait for it to let it tell us what happened
return <-sourceStartErrChan
}
if ctx.Err() != nil { // Don't return an error if the root context got cancelled
return nil
}
return fmt.Errorf("timed out waiting for source %s to Start. Please ensure that its Start() method is non-blocking", watch)
}
})
}
if err := errGroup.Wait(); err != nil {
if err := c.startEventSources(ctx); err != nil {
return err
}
@ -271,6 +218,65 @@ func (c *Controller[request]) Start(ctx context.Context) error {
return nil
}
// startEventSources launches all the sources registered with this controller and waits
// for them to sync. It returns an error if any of the sources fail to start or sync.
func (c *Controller[request]) startEventSources(ctx context.Context) error {
errGroup := &errgroup.Group{}
for _, watch := range c.startWatches {
log := c.LogConstructor(nil)
_, ok := watch.(interface {
String() string
})
if !ok {
log = log.WithValues("source", fmt.Sprintf("%T", watch))
} else {
log = log.WithValues("source", fmt.Sprintf("%s", watch))
}
didStartSyncingSource := &atomic.Bool{}
errGroup.Go(func() error {
// Use a timeout for starting and syncing the source to avoid silently
// blocking startup indefinitely if it doesn't come up.
sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout)
defer cancel()
sourceStartErrChan := make(chan error, 1) // Buffer chan to not leak goroutine if we time out
go func() {
defer close(sourceStartErrChan)
log.Info("Starting EventSource")
if err := watch.Start(ctx, c.Queue); err != nil {
sourceStartErrChan <- err
return
}
syncingSource, ok := watch.(source.TypedSyncingSource[request])
if !ok {
return
}
didStartSyncingSource.Store(true)
if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
err := fmt.Errorf("failed to wait for %s caches to sync %v: %w", c.Name, syncingSource, err)
log.Error(err, "Could not wait for Cache to sync")
sourceStartErrChan <- err
}
}()
select {
case err := <-sourceStartErrChan:
return err
case <-sourceStartCtx.Done():
if didStartSyncingSource.Load() { // We are racing with WaitForSync, wait for it to let it tell us what happened
return <-sourceStartErrChan
}
if ctx.Err() != nil { // Don't return an error if the root context got cancelled
return nil
}
return fmt.Errorf("timed out waiting for source %s to Start. Please ensure that its Start() method is non-blocking", watch)
}
})
}
return errGroup.Wait()
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the reconcileHandler.
func (c *Controller[request]) processNextWorkItem(ctx context.Context) bool {
@ -354,7 +360,7 @@ func (c *Controller[request]) reconcileHandler(ctx context.Context, req request,
c.Queue.Forget(req)
c.Queue.AddWithOpts(priorityqueue.AddOpts{After: result.RequeueAfter, Priority: priority}, req)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc()
case result.Requeue:
case result.Requeue: //nolint: staticcheck // We have to handle it until it is removed
log.V(5).Info("Reconcile done, requeueing")
c.Queue.AddWithOpts(priorityqueue.AddOpts{RateLimited: true, Priority: priority}, req)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc()

View File

@ -17,6 +17,8 @@ limitations under the License.
package metrics
import (
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"sigs.k8s.io/controller-runtime/pkg/metrics"
@ -60,6 +62,9 @@ var (
Help: "Length of time per reconciliation per controller",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60},
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
}, []string{"controller"})
// WorkerCount is a prometheus metric which holds the number of
@ -88,7 +93,7 @@ func init() {
ActiveWorkers,
// expose process metrics like CPU, Memory, file descriptor usage etc.
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
// expose Go runtime metrics like GC stats, memory stats etc.
collectors.NewGoCollector(),
// expose all Go runtime metrics like GC stats, memory stats etc.
collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll)),
)
}

View File

@ -17,6 +17,9 @@ limitations under the License.
package metrics
import (
"strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/metrics"
@ -42,8 +45,8 @@ var (
depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Subsystem: WorkQueueSubsystem,
Name: DepthKey,
Help: "Current depth of workqueue",
}, []string{"name", "controller"})
Help: "Current depth of workqueue by workqueue and priority",
}, []string{"name", "controller", "priority"})
adds = prometheus.NewCounterVec(prometheus.CounterOpts{
Subsystem: WorkQueueSubsystem,
@ -52,17 +55,23 @@ var (
}, []string{"name", "controller"})
latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: WorkQueueSubsystem,
Name: QueueLatencyKey,
Help: "How long in seconds an item stays in workqueue before being requested",
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12),
Subsystem: WorkQueueSubsystem,
Name: QueueLatencyKey,
Help: "How long in seconds an item stays in workqueue before being requested",
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12),
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
}, []string{"name", "controller"})
workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: WorkQueueSubsystem,
Name: WorkDurationKey,
Help: "How long in seconds processing an item from workqueue takes.",
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12),
Subsystem: WorkQueueSubsystem,
Name: WorkDurationKey,
Help: "How long in seconds processing an item from workqueue takes.",
Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12),
NativeHistogramBucketFactor: 1.1,
NativeHistogramMaxBucketNumber: 100,
NativeHistogramMinResetDuration: 1 * time.Hour,
}, []string{"name", "controller"})
unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{
@ -103,7 +112,7 @@ func init() {
type WorkqueueMetricsProvider struct{}
func (WorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
return depth.WithLabelValues(name, name)
return depth.WithLabelValues(name, name, "") // no priority
}
func (WorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
@ -129,3 +138,33 @@ func (WorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name str
func (WorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
return retries.WithLabelValues(name, name)
}
type MetricsProviderWithPriority interface {
workqueue.MetricsProvider
NewDepthMetricWithPriority(name string) DepthMetricWithPriority
}
// DepthMetricWithPriority represents a depth metric with priority.
type DepthMetricWithPriority interface {
Inc(priority int)
Dec(priority int)
}
var _ MetricsProviderWithPriority = WorkqueueMetricsProvider{}
func (WorkqueueMetricsProvider) NewDepthMetricWithPriority(name string) DepthMetricWithPriority {
return &depthWithPriorityMetric{lvs: []string{name, name}}
}
type depthWithPriorityMetric struct {
lvs []string
}
func (g *depthWithPriorityMetric) Inc(priority int) {
depth.WithLabelValues(append(g.lvs, strconv.Itoa(priority))...).Inc()
}
func (g *depthWithPriorityMetric) Dec(priority int) {
depth.WithLabelValues(append(g.lvs, strconv.Itoa(priority))...).Dec()
}

View File

@ -32,6 +32,8 @@ import (
var log = logf.RuntimeLog.WithName("source").WithName("EventHandler")
var _ cache.ResourceEventHandler = &EventHandler[client.Object, any]{}
// NewEventHandler creates a new EventHandler.
func NewEventHandler[object client.Object, request comparable](
ctx context.Context,
@ -57,19 +59,11 @@ type EventHandler[object client.Object, request comparable] struct {
predicates []predicate.TypedPredicate[object]
}
// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs
// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27
func (e *EventHandler[object, request]) HandlerFuncs() cache.ResourceEventHandlerFuncs {
return cache.ResourceEventHandlerFuncs{
AddFunc: e.OnAdd,
UpdateFunc: e.OnUpdate,
DeleteFunc: e.OnDelete,
}
}
// OnAdd creates CreateEvent and calls Create on EventHandler.
func (e *EventHandler[object, request]) OnAdd(obj interface{}) {
c := event.TypedCreateEvent[object]{}
func (e *EventHandler[object, request]) OnAdd(obj interface{}, isInInitialList bool) {
c := event.TypedCreateEvent[object]{
IsInInitialList: isInInitialList,
}
// Pull Object out of the object
if o, ok := obj.(object); ok {

View File

@ -10,7 +10,9 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
toolscache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -18,6 +20,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
var logKind = logf.RuntimeLog.WithName("source").WithName("Kind")
// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create).
type Kind[object client.Object, request comparable] struct {
// Type is the type of object to watch. e.g. &v1.Pod{}
@ -68,12 +72,12 @@ func (ks *Kind[object, request]) Start(ctx context.Context, queue workqueue.Type
kindMatchErr := &meta.NoKindMatchError{}
switch {
case errors.As(lastErr, &kindMatchErr):
log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start",
logKind.Error(lastErr, "if kind is a CRD, it should be installed before calling Start",
"kind", kindMatchErr.GroupKind)
case runtime.IsNotRegisteredError(lastErr):
log.Error(lastErr, "kind must be registered to the Scheme")
logKind.Error(lastErr, "kind must be registered to the Scheme")
default:
log.Error(lastErr, "failed to get informer from cache")
logKind.Error(lastErr, "failed to get informer from cache")
}
return false, nil // Retry.
}
@ -87,7 +91,9 @@ func (ks *Kind[object, request]) Start(ctx context.Context, queue workqueue.Type
return
}
_, err := i.AddEventHandler(NewEventHandler(ctx, queue, ks.Handler, ks.Predicates).HandlerFuncs())
_, err := i.AddEventHandlerWithOptions(NewEventHandler(ctx, queue, ks.Handler, ks.Predicates), toolscache.HandlerOptions{
Logger: &logKind,
})
if err != nil {
ks.startedErr <- err
return