mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update controller-runtime package to v0.9.2
This commit updates controller-runtime to v0.9.2 and makes changes in persistentvolume.go to add context to various functions and function calls made here instead of context.TODO(). Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
260
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
generated
vendored
260
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
generated
vendored
@ -17,32 +17,27 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"github.com/go-logr/logr"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
var log = logf.RuntimeLog.WithName("controller")
|
||||
|
||||
var _ inject.Injector = &Controller{}
|
||||
|
||||
// Controller implements controller.Controller
|
||||
// Controller implements controller.Controller.
|
||||
type Controller struct {
|
||||
// Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required.
|
||||
Name string
|
||||
@ -55,19 +50,6 @@ type Controller struct {
|
||||
// Defaults to the DefaultReconcileFunc.
|
||||
Do reconcile.Reconciler
|
||||
|
||||
// Client is a lazily initialized Client. The controllerManager will initialize this when Start is called.
|
||||
Client client.Client
|
||||
|
||||
// Scheme is injected by the controllerManager when controllerManager.Start is called
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
// informers are injected by the controllerManager when controllerManager.Start is called
|
||||
Cache cache.Cache
|
||||
|
||||
// Config is the rest.Config used to talk to the apiserver. Defaults to one of in-cluster, environment variable
|
||||
// specified, or the ~/.kube/Config.
|
||||
Config *rest.Config
|
||||
|
||||
// MakeQueue constructs the queue for this controller once the controller is ready to start.
|
||||
// This exists because the standard Kubernetes workqueues start themselves immediately, which
|
||||
// leads to goroutine leaks if something calls controller.New repeatedly.
|
||||
@ -78,29 +60,31 @@ type Controller struct {
|
||||
Queue workqueue.RateLimitingInterface
|
||||
|
||||
// SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates
|
||||
// Deprecated: the caller should handle injected fields itself.
|
||||
SetFields func(i interface{}) error
|
||||
|
||||
// mu is used to synchronize Controller setup
|
||||
mu sync.Mutex
|
||||
|
||||
// JitterPeriod allows tests to reduce the JitterPeriod so they complete faster
|
||||
JitterPeriod time.Duration
|
||||
|
||||
// WaitForCacheSync allows tests to mock out the WaitForCacheSync function to return an error
|
||||
// defaults to Cache.WaitForCacheSync
|
||||
WaitForCacheSync func(stopCh <-chan struct{}) bool
|
||||
|
||||
// Started is true if the Controller has been Started
|
||||
Started bool
|
||||
|
||||
// Recorder is an event recorder for recording Event resources to the
|
||||
// Kubernetes API.
|
||||
Recorder record.EventRecorder
|
||||
// ctx is the context that was passed to Start() and used when starting watches.
|
||||
//
|
||||
// According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context,
|
||||
// while we usually always strive to follow best practices, we consider this a legacy case and it should
|
||||
// undergo a major refactoring and redesign to allow for context to not be stored in a struct.
|
||||
ctx context.Context
|
||||
|
||||
// TODO(community): Consider initializing a logger with the Controller Name as the tag
|
||||
// CacheSyncTimeout refers to the time limit set on waiting for cache to sync
|
||||
// Defaults to 2 minutes if not set.
|
||||
CacheSyncTimeout time.Duration
|
||||
|
||||
// watches maintains a list of sources, handlers, and predicates to start when the controller is started.
|
||||
watches []watchDescription
|
||||
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
|
||||
startWatches []watchDescription
|
||||
|
||||
// Log is used to log messages to users during reconciliation, or for example when a watch is started.
|
||||
Log logr.Logger
|
||||
}
|
||||
|
||||
// watchDescription contains all the information necessary to start a watch.
|
||||
@ -110,12 +94,14 @@ type watchDescription struct {
|
||||
predicates []predicate.Predicate
|
||||
}
|
||||
|
||||
// Reconcile implements reconcile.Reconciler
|
||||
func (c *Controller) Reconcile(r reconcile.Request) (reconcile.Result, error) {
|
||||
return c.Do.Reconcile(r)
|
||||
// Reconcile implements reconcile.Reconciler.
|
||||
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
|
||||
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
|
||||
ctx = logf.IntoContext(ctx, log)
|
||||
return c.Do.Reconcile(ctx, req)
|
||||
}
|
||||
|
||||
// Watch implements controller.Controller
|
||||
// Watch implements controller.Controller.
|
||||
func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
@ -133,24 +119,39 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc
|
||||
}
|
||||
}
|
||||
|
||||
c.watches = append(c.watches, watchDescription{src: src, handler: evthdler, predicates: prct})
|
||||
if c.Started {
|
||||
log.Info("Starting EventSource", "controller", c.Name, "source", src)
|
||||
return src.Start(evthdler, c.Queue, prct...)
|
||||
// Controller hasn't started yet, store the watches locally and return.
|
||||
//
|
||||
// These watches are going to be held on the controller struct until the manager or user calls Start(...).
|
||||
if !c.Started {
|
||||
c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct})
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
c.Log.Info("Starting EventSource", "source", src)
|
||||
return src.Start(c.ctx, evthdler, c.Queue, prct...)
|
||||
}
|
||||
|
||||
// Start implements controller.Controller
|
||||
func (c *Controller) Start(stop <-chan struct{}) error {
|
||||
// Start implements controller.Controller.
|
||||
func (c *Controller) Start(ctx context.Context) error {
|
||||
// use an IIFE to get proper lock handling
|
||||
// but lock outside to get proper handling of the queue shutdown
|
||||
c.mu.Lock()
|
||||
if c.Started {
|
||||
return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times")
|
||||
}
|
||||
|
||||
c.initMetrics()
|
||||
|
||||
// Set the internal context.
|
||||
c.ctx = ctx
|
||||
|
||||
c.Queue = c.MakeQueue()
|
||||
defer c.Queue.ShutDown() // needs to be outside the iife so that we shutdown after the stop channel is closed
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
c.Queue.ShutDown()
|
||||
}()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
err := func() error {
|
||||
defer c.mu.Unlock()
|
||||
|
||||
@ -160,37 +161,59 @@ func (c *Controller) Start(stop <-chan struct{}) error {
|
||||
// NB(directxman12): launch the sources *before* trying to wait for the
|
||||
// caches to sync so that they have a chance to register their intendeded
|
||||
// caches.
|
||||
for _, watch := range c.watches {
|
||||
log.Info("Starting EventSource", "controller", c.Name, "source", watch.src)
|
||||
if err := watch.src.Start(watch.handler, c.Queue, watch.predicates...); err != nil {
|
||||
for _, watch := range c.startWatches {
|
||||
c.Log.Info("Starting EventSource", "source", watch.src)
|
||||
|
||||
if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches
|
||||
log.Info("Starting Controller", "controller", c.Name)
|
||||
c.Log.Info("Starting Controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
if c.WaitForCacheSync == nil {
|
||||
c.WaitForCacheSync = c.Cache.WaitForCacheSync
|
||||
}
|
||||
if ok := c.WaitForCacheSync(stop); !ok {
|
||||
// This code is unreachable right now since WaitForCacheSync will never return an error
|
||||
// Leaving it here because that could happen in the future
|
||||
err := fmt.Errorf("failed to wait for %s caches to sync", c.Name)
|
||||
log.Error(err, "Could not wait for Cache to sync", "controller", c.Name)
|
||||
return err
|
||||
for _, watch := range c.startWatches {
|
||||
syncingSource, ok := watch.src.(source.SyncingSource)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
// use a context with timeout for launching sources and syncing caches.
|
||||
sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout)
|
||||
defer cancel()
|
||||
|
||||
// WaitForSync waits for a definitive timeout, and returns if there
|
||||
// is an error or a timeout
|
||||
if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
|
||||
err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err)
|
||||
c.Log.Error(err, "Could not wait for Cache to sync")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.JitterPeriod == 0 {
|
||||
c.JitterPeriod = 1 * time.Second
|
||||
}
|
||||
// All the watches have been started, we can reset the local slice.
|
||||
//
|
||||
// We should never hold watches more than necessary, each watch source can hold a backing cache,
|
||||
// which won't be garbage collected if we hold a reference to it.
|
||||
c.startWatches = nil
|
||||
|
||||
// Launch workers to process resources
|
||||
log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles)
|
||||
c.Log.Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
|
||||
wg.Add(c.MaxConcurrentReconciles)
|
||||
for i := 0; i < c.MaxConcurrentReconciles; i++ {
|
||||
// Process work items
|
||||
go wait.Until(c.worker, c.JitterPeriod, stop)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Run a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the reconcileHandler is never invoked concurrently with the same object.
|
||||
for c.processNextWorkItem(ctx) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
c.Started = true
|
||||
@ -200,21 +223,16 @@ func (c *Controller) Start(stop <-chan struct{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
<-stop
|
||||
log.Info("Stopping workers", "controller", c.Name)
|
||||
<-ctx.Done()
|
||||
c.Log.Info("Shutdown signal received, waiting for all workers to finish")
|
||||
wg.Wait()
|
||||
c.Log.Info("All workers finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the reconcileHandler is never invoked concurrently with the same object.
|
||||
func (c *Controller) worker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWorkItem will read a single work item off the workqueue and
|
||||
// attempt to process it, by calling the reconcileHandler.
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
obj, shutdown := c.Queue.Get()
|
||||
if shutdown {
|
||||
// Stop working
|
||||
@ -229,70 +247,92 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
// period.
|
||||
defer c.Queue.Done(obj)
|
||||
|
||||
return c.reconcileHandler(obj)
|
||||
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1)
|
||||
defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1)
|
||||
|
||||
c.reconcileHandler(ctx, obj)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) reconcileHandler(obj interface{}) bool {
|
||||
const (
|
||||
labelError = "error"
|
||||
labelRequeueAfter = "requeue_after"
|
||||
labelRequeue = "requeue"
|
||||
labelSuccess = "success"
|
||||
)
|
||||
|
||||
func (c *Controller) initMetrics() {
|
||||
ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0)
|
||||
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0)
|
||||
ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles))
|
||||
}
|
||||
|
||||
func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
|
||||
// Update metrics after processing each item
|
||||
reconcileStartTS := time.Now()
|
||||
defer func() {
|
||||
c.updateMetrics(time.Since(reconcileStartTS))
|
||||
}()
|
||||
|
||||
var req reconcile.Request
|
||||
var ok bool
|
||||
if req, ok = obj.(reconcile.Request); !ok {
|
||||
// Make sure that the the object is a valid request.
|
||||
req, ok := obj.(reconcile.Request)
|
||||
if !ok {
|
||||
// As the item in the workqueue is actually invalid, we call
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.Queue.Forget(obj)
|
||||
log.Error(nil, "Queue item was not a Request",
|
||||
"controller", c.Name, "type", fmt.Sprintf("%T", obj), "value", obj)
|
||||
c.Log.Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
|
||||
// Return true, don't take a break
|
||||
return true
|
||||
return
|
||||
}
|
||||
// RunInformersAndControllers the syncHandler, passing it the namespace/Name string of the
|
||||
|
||||
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
|
||||
ctx = logf.IntoContext(ctx, log)
|
||||
|
||||
// RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the
|
||||
// resource to be synced.
|
||||
if result, err := c.Do.Reconcile(req); err != nil {
|
||||
result, err := c.Do.Reconcile(ctx, req)
|
||||
switch {
|
||||
case err != nil:
|
||||
c.Queue.AddRateLimited(req)
|
||||
log.Error(err, "Reconciler error", "controller", c.Name, "request", req)
|
||||
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc()
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "error").Inc()
|
||||
return false
|
||||
} else if result.RequeueAfter > 0 {
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc()
|
||||
log.Error(err, "Reconciler error")
|
||||
case result.RequeueAfter > 0:
|
||||
// The result.RequeueAfter request will be lost, if it is returned
|
||||
// along with a non-nil error. But this is intended as
|
||||
// We need to drive to stable reconcile loops before queuing due
|
||||
// to result.RequestAfter
|
||||
c.Queue.Forget(obj)
|
||||
c.Queue.AddAfter(req, result.RequeueAfter)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue_after").Inc()
|
||||
return true
|
||||
} else if result.Requeue {
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc()
|
||||
case result.Requeue:
|
||||
c.Queue.AddRateLimited(req)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue").Inc()
|
||||
return true
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc()
|
||||
default:
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.Queue.Forget(obj)
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc()
|
||||
}
|
||||
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.Queue.Forget(obj)
|
||||
|
||||
// TODO(directxman12): What does 1 mean? Do we want level constants? Do we want levels at all?
|
||||
log.V(1).Info("Successfully Reconciled", "controller", c.Name, "request", req)
|
||||
|
||||
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "success").Inc()
|
||||
// Return true, don't take a break
|
||||
return true
|
||||
}
|
||||
|
||||
// InjectFunc implement SetFields.Injector
|
||||
// GetLogger returns this controller's logger.
|
||||
func (c *Controller) GetLogger() logr.Logger {
|
||||
return c.Log
|
||||
}
|
||||
|
||||
// InjectFunc implement SetFields.Injector.
|
||||
func (c *Controller) InjectFunc(f inject.Func) error {
|
||||
c.SetFields = f
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateMetrics updates prometheus metrics within the controller
|
||||
// updateMetrics updates prometheus metrics within the controller.
|
||||
func (c *Controller) updateMetrics(reconcileTime time.Duration) {
|
||||
ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds())
|
||||
}
|
||||
|
29
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go
generated
vendored
29
vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go
generated
vendored
@ -18,6 +18,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
@ -25,24 +26,40 @@ var (
|
||||
// ReconcileTotal is a prometheus counter metrics which holds the total
|
||||
// number of reconciliations per controller. It has two labels. controller label refers
|
||||
// to the controller name and result label refers to the reconcile result i.e
|
||||
// success, error, requeue, requeue_after
|
||||
// success, error, requeue, requeue_after.
|
||||
ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "controller_runtime_reconcile_total",
|
||||
Help: "Total number of reconciliations per controller",
|
||||
}, []string{"controller", "result"})
|
||||
|
||||
// ReconcileErrors is a prometheus counter metrics which holds the total
|
||||
// number of errors from the Reconciler
|
||||
// number of errors from the Reconciler.
|
||||
ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "controller_runtime_reconcile_errors_total",
|
||||
Help: "Total number of reconciliation errors per controller",
|
||||
}, []string{"controller"})
|
||||
|
||||
// ReconcileTime is a prometheus metric which keeps track of the duration
|
||||
// of reconciliations
|
||||
// of reconciliations.
|
||||
ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "controller_runtime_reconcile_time_seconds",
|
||||
Help: "Length of time per reconciliation per controller",
|
||||
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
|
||||
1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60},
|
||||
}, []string{"controller"})
|
||||
|
||||
// WorkerCount is a prometheus metric which holds the number of
|
||||
// concurrent reconciles per controller.
|
||||
WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "controller_runtime_max_concurrent_reconciles",
|
||||
Help: "Maximum number of concurrent reconciles per controller",
|
||||
}, []string{"controller"})
|
||||
|
||||
// ActiveWorkers is a prometheus metric which holds the number
|
||||
// of active workers per controller.
|
||||
ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "controller_runtime_active_workers",
|
||||
Help: "Number of currently used workers per controller",
|
||||
}, []string{"controller"})
|
||||
)
|
||||
|
||||
@ -51,9 +68,11 @@ func init() {
|
||||
ReconcileTotal,
|
||||
ReconcileErrors,
|
||||
ReconcileTime,
|
||||
WorkerCount,
|
||||
ActiveWorkers,
|
||||
// expose process metrics like CPU, Memory, file descriptor usage etc.
|
||||
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
// expose Go runtime metrics like GC stats, memory stats etc.
|
||||
prometheus.NewGoCollector(),
|
||||
collectors.NewGoCollector(),
|
||||
)
|
||||
}
|
||||
|
3
vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go
generated
vendored
3
vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go
generated
vendored
@ -14,9 +14,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package log contains utilities for fetching a new logger
|
||||
// when one is not already available.
|
||||
// Deprecated: use pkg/log
|
||||
package log
|
||||
|
||||
import (
|
||||
|
78
vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go
generated
vendored
Normal file
78
vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package objectutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
)
|
||||
|
||||
// FilterWithLabels returns a copy of the items in objs matching labelSel.
|
||||
func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) {
|
||||
outItems := make([]runtime.Object, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
meta, err := apimeta.Accessor(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if labelSel != nil {
|
||||
lbls := labels.Set(meta.GetLabels())
|
||||
if !labelSel.Matches(lbls) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
outItems = append(outItems, obj.DeepCopyObject())
|
||||
}
|
||||
return outItems, nil
|
||||
}
|
||||
|
||||
// IsAPINamespaced returns true if the object is namespace scoped.
|
||||
// For unstructured objects the gvk is found from the object itself.
|
||||
func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
|
||||
gvk, err := apiutil.GVKForObject(obj, scheme)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return IsAPINamespacedWithGVK(gvk, scheme, restmapper)
|
||||
}
|
||||
|
||||
// IsAPINamespacedWithGVK returns true if the object having the provided
|
||||
// GVK is namespace scoped.
|
||||
func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) {
|
||||
restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get restmapping: %w", err)
|
||||
}
|
||||
|
||||
scope := restmapping.Scope.Name()
|
||||
|
||||
if scope == "" {
|
||||
return false, errors.New("scope cannot be identified, empty scope returned")
|
||||
}
|
||||
|
||||
if scope != apimeta.RESTScopeNameRoot {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
147
vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go
generated
vendored
147
vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go
generated
vendored
@ -17,7 +17,9 @@ limitations under the License.
|
||||
package recorder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@ -26,35 +28,150 @@ import (
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/recorder"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
// EventBroadcasterProducer makes an event broadcaster, returning
|
||||
// whether or not the broadcaster should be stopped with the Provider,
|
||||
// or not (e.g. if it's shared, it shouldn't be stopped with the Provider).
|
||||
type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool)
|
||||
|
||||
// Provider is a recorder.Provider that records events to the k8s API server
|
||||
// and to a logr Logger.
|
||||
type Provider struct {
|
||||
lock sync.RWMutex
|
||||
stopped bool
|
||||
|
||||
// scheme to specify when creating a recorder
|
||||
scheme *runtime.Scheme
|
||||
// eventBroadcaster to create new recorder instance
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
// logger is the logger to use when logging diagnostic event info
|
||||
logger logr.Logger
|
||||
logger logr.Logger
|
||||
evtClient typedcorev1.EventInterface
|
||||
makeBroadcaster EventBroadcasterProducer
|
||||
|
||||
broadcasterOnce sync.Once
|
||||
broadcaster record.EventBroadcaster
|
||||
stopBroadcaster bool
|
||||
}
|
||||
|
||||
// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to
|
||||
// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election
|
||||
// code finishes up and tries to continue emitting events.
|
||||
|
||||
// Stop attempts to stop this provider, stopping the underlying broadcaster
|
||||
// if the broadcaster asked to be stopped. It kinda tries to honor the given
|
||||
// context, but the underlying broadcaster has an indefinite wait that doesn't
|
||||
// return until all queued events are flushed, so this may end up just returning
|
||||
// before the underlying wait has finished instead of cancelling the wait.
|
||||
// This is Very Frustrating™.
|
||||
func (p *Provider) Stop(shutdownCtx context.Context) {
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
// technically, this could start the broadcaster, but practically, it's
|
||||
// almost certainly already been started (e.g. by leader election). We
|
||||
// need to invoke this to ensure that we don't inadvertently race with
|
||||
// an invocation of getBroadcaster.
|
||||
broadcaster := p.getBroadcaster()
|
||||
if p.stopBroadcaster {
|
||||
p.lock.Lock()
|
||||
broadcaster.Shutdown()
|
||||
p.stopped = true
|
||||
p.lock.Unlock()
|
||||
}
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-shutdownCtx.Done():
|
||||
case <-doneCh:
|
||||
}
|
||||
}
|
||||
|
||||
// getBroadcaster ensures that a broadcaster is started for this
|
||||
// provider, and returns it. It's threadsafe.
|
||||
func (p *Provider) getBroadcaster() record.EventBroadcaster {
|
||||
// NB(directxman12): this can technically still leak if something calls
|
||||
// "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we
|
||||
// create the broadcaster in start, we could race with other things that
|
||||
// are started at the same time & want to emit events. The alternative is
|
||||
// silently swallowing events and more locking, but that seems suboptimal.
|
||||
|
||||
p.broadcasterOnce.Do(func() {
|
||||
broadcaster, stop := p.makeBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: p.evtClient})
|
||||
broadcaster.StartEventWatcher(
|
||||
func(e *corev1.Event) {
|
||||
p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
p.broadcaster = broadcaster
|
||||
p.stopBroadcaster = stop
|
||||
})
|
||||
|
||||
return p.broadcaster
|
||||
}
|
||||
|
||||
// NewProvider create a new Provider instance.
|
||||
func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error) {
|
||||
func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) {
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init clientSet: %w", err)
|
||||
}
|
||||
|
||||
p := &provider{scheme: scheme, logger: logger, eventBroadcaster: broadcaster}
|
||||
p.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: clientSet.CoreV1().Events("")})
|
||||
p.eventBroadcaster.StartEventWatcher(
|
||||
func(e *corev1.Event) {
|
||||
p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
|
||||
p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: clientSet.CoreV1().Events("")}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *provider) GetEventRecorderFor(name string) record.EventRecorder {
|
||||
return p.eventBroadcaster.NewRecorder(p.scheme, corev1.EventSource{Component: name})
|
||||
// GetEventRecorderFor returns an event recorder that broadcasts to this provider's
|
||||
// broadcaster. All events will be associated with a component of the given name.
|
||||
func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder {
|
||||
return &lazyRecorder{
|
||||
prov: p,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// lazyRecorder is a recorder that doesn't actually instantiate any underlying
|
||||
// recorder until the first event is emitted.
|
||||
type lazyRecorder struct {
|
||||
prov *Provider
|
||||
name string
|
||||
|
||||
recOnce sync.Once
|
||||
rec record.EventRecorder
|
||||
}
|
||||
|
||||
// ensureRecording ensures that a concrete recorder is populated for this recorder.
|
||||
func (l *lazyRecorder) ensureRecording() {
|
||||
l.recOnce.Do(func() {
|
||||
broadcaster := l.prov.getBroadcaster()
|
||||
l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name})
|
||||
})
|
||||
}
|
||||
|
||||
func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
l.ensureRecording()
|
||||
|
||||
l.prov.lock.RLock()
|
||||
if !l.prov.stopped {
|
||||
l.rec.Event(object, eventtype, reason, message)
|
||||
}
|
||||
l.prov.lock.RUnlock()
|
||||
}
|
||||
func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
l.ensureRecording()
|
||||
|
||||
l.prov.lock.RLock()
|
||||
if !l.prov.stopped {
|
||||
l.rec.Eventf(object, eventtype, reason, messageFmt, args...)
|
||||
}
|
||||
l.prov.lock.RUnlock()
|
||||
}
|
||||
func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
l.ensureRecording()
|
||||
|
||||
l.prov.lock.RLock()
|
||||
if !l.prov.stopped {
|
||||
l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...)
|
||||
}
|
||||
l.prov.lock.RUnlock()
|
||||
}
|
||||
|
Reference in New Issue
Block a user