mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update controller-runtime package to v0.9.2
This commit updates controller-runtime to v0.9.2 and makes changes in persistentvolume.go to add context to various functions and function calls made here instead of context.TODO(). Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
554
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
554
vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
generated
vendored
@ -18,15 +18,18 @@ package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
@ -34,64 +37,51 @@ import (
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cluster"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
const (
|
||||
// Values taken from: https://github.com/kubernetes/apiserver/blob/master/pkg/apis/config/v1alpha1/defaults.go
|
||||
defaultLeaseDuration = 15 * time.Second
|
||||
defaultRenewDeadline = 10 * time.Second
|
||||
defaultRetryPeriod = 2 * time.Second
|
||||
defaultLeaseDuration = 15 * time.Second
|
||||
defaultRenewDeadline = 10 * time.Second
|
||||
defaultRetryPeriod = 2 * time.Second
|
||||
defaultGracefulShutdownPeriod = 30 * time.Second
|
||||
|
||||
defaultReadinessEndpoint = "/readyz"
|
||||
defaultLivenessEndpoint = "/healthz"
|
||||
defaultMetricsEndpoint = "/metrics"
|
||||
)
|
||||
|
||||
var log = logf.RuntimeLog.WithName("manager")
|
||||
var _ Runnable = &controllerManager{}
|
||||
|
||||
type controllerManager struct {
|
||||
// config is the rest.config used to talk to the apiserver. Required.
|
||||
config *rest.Config
|
||||
|
||||
// scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults
|
||||
// to scheme.scheme.
|
||||
scheme *runtime.Scheme
|
||||
// cluster holds a variety of methods to interact with a cluster. Required.
|
||||
cluster cluster.Cluster
|
||||
|
||||
// leaderElectionRunnables is the set of Controllers that the controllerManager injects deps into and Starts.
|
||||
// These Runnables are managed by lead election.
|
||||
leaderElectionRunnables []Runnable
|
||||
|
||||
// nonLeaderElectionRunnables is the set of webhook servers that the controllerManager injects deps into and Starts.
|
||||
// These Runnables will not be blocked by lead election.
|
||||
nonLeaderElectionRunnables []Runnable
|
||||
|
||||
cache cache.Cache
|
||||
|
||||
// TODO(directxman12): Provide an escape hatch to get individual indexers
|
||||
// client is the client injected into Controllers (and EventHandlers, Sources and Predicates).
|
||||
client client.Client
|
||||
|
||||
// apiReader is the reader that will make requests to the api server and not the cache.
|
||||
apiReader client.Reader
|
||||
|
||||
// fieldIndexes knows how to add field indexes over the Cache used by this controller,
|
||||
// which can later be consumed via field selectors from the injected client.
|
||||
fieldIndexes client.FieldIndexer
|
||||
|
||||
// recorderProvider is used to generate event recorders that will be injected into Controllers
|
||||
// (and EventHandlers, Sources and Predicates).
|
||||
recorderProvider recorder.Provider
|
||||
recorderProvider *intrec.Provider
|
||||
|
||||
// resourceLock forms the basis for leader election
|
||||
resourceLock resourcelock.Interface
|
||||
|
||||
// mapper is used to map resources to kind, and map kind and version.
|
||||
mapper meta.RESTMapper
|
||||
// leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease
|
||||
// on shutdown
|
||||
leaderElectionReleaseOnCancel bool
|
||||
|
||||
// metricsListener is used to serve prometheus metrics
|
||||
metricsListener net.Listener
|
||||
@ -118,28 +108,33 @@ type controllerManager struct {
|
||||
started bool
|
||||
startedLeader bool
|
||||
healthzStarted bool
|
||||
errChan chan error
|
||||
|
||||
// NB(directxman12): we don't just use an error channel here to avoid the situation where the
|
||||
// error channel is too small and we end up blocking some goroutines waiting to report their errors.
|
||||
// errSignal lets us track when we should stop because an error occurred
|
||||
errSignal *errSignaler
|
||||
// controllerOptions are the global controller options.
|
||||
controllerOptions v1alpha1.ControllerConfigurationSpec
|
||||
|
||||
// internalStop is the stop channel *actually* used by everything involved
|
||||
// with the manager as a stop channel, so that we can pass a stop channel
|
||||
// to things that need it off the bat (like the Channel source). It can
|
||||
// be closed via `internalStopper` (by being the same underlying channel).
|
||||
internalStop <-chan struct{}
|
||||
// Logger is the logger that should be used by this manager.
|
||||
// If none is set, it defaults to log.Log global logger.
|
||||
logger logr.Logger
|
||||
|
||||
// internalStopper is the write side of the internal stop channel, allowing us to close it.
|
||||
// It and `internalStop` should point to the same channel.
|
||||
internalStopper chan<- struct{}
|
||||
// leaderElectionCancel is used to cancel the leader election. It is distinct from internalStopper,
|
||||
// because for safety reasons we need to os.Exit() when we lose the leader election, meaning that
|
||||
// it must be deferred until after gracefulShutdown is done.
|
||||
leaderElectionCancel context.CancelFunc
|
||||
|
||||
// leaderElectionStopped is an internal channel used to signal the stopping procedure that the
|
||||
// LeaderElection.Run(...) function has returned and the shutdown can proceed.
|
||||
leaderElectionStopped chan struct{}
|
||||
|
||||
// stop procedure engaged. In other words, we should not add anything else to the manager
|
||||
stopProcedureEngaged bool
|
||||
|
||||
// elected is closed when this manager becomes the leader of a group of
|
||||
// managers, either because it won a leader election or because no leader
|
||||
// election was configured.
|
||||
elected chan struct{}
|
||||
|
||||
startCache func(stop <-chan struct{}) error
|
||||
caches []hasCache
|
||||
|
||||
// port is the port that the webhook server serves at.
|
||||
port int
|
||||
@ -151,67 +146,57 @@ type controllerManager struct {
|
||||
certDir string
|
||||
|
||||
webhookServer *webhook.Server
|
||||
// webhookServerOnce will be called in GetWebhookServer() to optionally initialize
|
||||
// webhookServer if unset, and Add() it to controllerManager.
|
||||
webhookServerOnce sync.Once
|
||||
|
||||
// leaseDuration is the duration that non-leader candidates will
|
||||
// wait to force acquire leadership.
|
||||
leaseDuration time.Duration
|
||||
// renewDeadline is the duration that the acting master will retry
|
||||
// renewDeadline is the duration that the acting controlplane will retry
|
||||
// refreshing leadership before giving up.
|
||||
renewDeadline time.Duration
|
||||
// retryPeriod is the duration the LeaderElector clients should wait
|
||||
// between tries of actions.
|
||||
retryPeriod time.Duration
|
||||
|
||||
// waitForRunnable is holding the number of runnables currently running so that
|
||||
// we can wait for them to exit before quitting the manager
|
||||
waitForRunnable sync.WaitGroup
|
||||
|
||||
// gracefulShutdownTimeout is the duration given to runnable to stop
|
||||
// before the manager actually returns on stop.
|
||||
gracefulShutdownTimeout time.Duration
|
||||
|
||||
// onStoppedLeading is callled when the leader election lease is lost.
|
||||
// It can be overridden for tests.
|
||||
onStoppedLeading func()
|
||||
|
||||
// shutdownCtx is the context that can be used during shutdown. It will be cancelled
|
||||
// after the gracefulShutdownTimeout ended. It must not be accessed before internalStop
|
||||
// is closed because it will be nil.
|
||||
shutdownCtx context.Context
|
||||
|
||||
internalCtx context.Context
|
||||
internalCancel context.CancelFunc
|
||||
|
||||
// internalProceduresStop channel is used internally to the manager when coordinating
|
||||
// the proper shutdown of servers. This channel is also used for dependency injection.
|
||||
internalProceduresStop chan struct{}
|
||||
}
|
||||
|
||||
type errSignaler struct {
|
||||
// errSignal indicates that an error occurred, when closed. It shouldn't
|
||||
// be written to.
|
||||
errSignal chan struct{}
|
||||
|
||||
// err is the received error
|
||||
err error
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *errSignaler) SignalError(err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if err == nil {
|
||||
// non-error, ignore
|
||||
log.Error(nil, "SignalError called without an (with a nil) error, which should never happen, ignoring")
|
||||
return
|
||||
}
|
||||
|
||||
if r.err != nil {
|
||||
// we already have an error, don't try again
|
||||
return
|
||||
}
|
||||
|
||||
// save the error and report it
|
||||
r.err = err
|
||||
close(r.errSignal)
|
||||
}
|
||||
|
||||
func (r *errSignaler) Error() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.err
|
||||
}
|
||||
|
||||
func (r *errSignaler) GotError() chan struct{} {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.errSignal
|
||||
type hasCache interface {
|
||||
Runnable
|
||||
GetCache() cache.Cache
|
||||
}
|
||||
|
||||
// Add sets dependencies on i, and adds it to the list of Runnables to start.
|
||||
func (cm *controllerManager) Add(r Runnable) error {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
if cm.stopProcedureEngaged {
|
||||
return errors.New("can't accept new runnable as stop procedure is already engaged")
|
||||
}
|
||||
|
||||
// Set dependencies on the object
|
||||
if err := cm.SetFields(r); err != nil {
|
||||
@ -224,6 +209,8 @@ func (cm *controllerManager) Add(r Runnable) error {
|
||||
if leRunnable, ok := r.(LeaderElectionRunnable); ok && !leRunnable.NeedLeaderElection() {
|
||||
shouldStart = cm.started
|
||||
cm.nonLeaderElectionRunnables = append(cm.nonLeaderElectionRunnables, r)
|
||||
} else if hasCache, ok := r.(hasCache); ok {
|
||||
cm.caches = append(cm.caches, hasCache)
|
||||
} else {
|
||||
shouldStart = cm.startedLeader
|
||||
cm.leaderElectionRunnables = append(cm.leaderElectionRunnables, r)
|
||||
@ -231,41 +218,27 @@ func (cm *controllerManager) Add(r Runnable) error {
|
||||
|
||||
if shouldStart {
|
||||
// If already started, start the controller
|
||||
go func() {
|
||||
if err := r.Start(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
}()
|
||||
cm.startRunnable(r)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10.
|
||||
func (cm *controllerManager) SetFields(i interface{}) error {
|
||||
if _, err := inject.ConfigInto(cm.config, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.ClientInto(cm.client, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.APIReaderInto(cm.apiReader, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.SchemeInto(cm.scheme, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.CacheInto(cm.cache, i); err != nil {
|
||||
if err := cm.cluster.SetFields(i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.InjectorInto(cm.SetFields, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.StopChannelInto(cm.internalStop, i); err != nil {
|
||||
if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := inject.MapperInto(cm.mapper, i); err != nil {
|
||||
if _, err := inject.LoggerInto(cm.logger, i); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -278,21 +251,24 @@ func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Ha
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
_, found := cm.metricsExtraHandlers[path]
|
||||
if found {
|
||||
if _, found := cm.metricsExtraHandlers[path]; found {
|
||||
return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path)
|
||||
}
|
||||
|
||||
cm.metricsExtraHandlers[path] = handler
|
||||
log.V(2).Info("Registering metrics http server extra handler", "path", path)
|
||||
cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddHealthzCheck allows you to add Healthz checker
|
||||
// AddHealthzCheck allows you to add Healthz checker.
|
||||
func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if cm.stopProcedureEngaged {
|
||||
return errors.New("can't accept new healthCheck as stop procedure is already engaged")
|
||||
}
|
||||
|
||||
if cm.healthzStarted {
|
||||
return fmt.Errorf("unable to add new checker because healthz endpoint has already been created")
|
||||
}
|
||||
@ -305,11 +281,15 @@ func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddReadyzCheck allows you to add Readyz checker
|
||||
// AddReadyzCheck allows you to add Readyz checker.
|
||||
func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if cm.stopProcedureEngaged {
|
||||
return errors.New("can't accept new ready check as stop procedure is already engaged")
|
||||
}
|
||||
|
||||
if cm.healthzStarted {
|
||||
return fmt.Errorf("unable to add new checker because readyz endpoint has already been created")
|
||||
}
|
||||
@ -323,52 +303,62 @@ func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker)
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetConfig() *rest.Config {
|
||||
return cm.config
|
||||
return cm.cluster.GetConfig()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetClient() client.Client {
|
||||
return cm.client
|
||||
return cm.cluster.GetClient()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetScheme() *runtime.Scheme {
|
||||
return cm.scheme
|
||||
return cm.cluster.GetScheme()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer {
|
||||
return cm.fieldIndexes
|
||||
return cm.cluster.GetFieldIndexer()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetCache() cache.Cache {
|
||||
return cm.cache
|
||||
return cm.cluster.GetCache()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder {
|
||||
return cm.recorderProvider.GetEventRecorderFor(name)
|
||||
return cm.cluster.GetEventRecorderFor(name)
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetRESTMapper() meta.RESTMapper {
|
||||
return cm.mapper
|
||||
return cm.cluster.GetRESTMapper()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetAPIReader() client.Reader {
|
||||
return cm.apiReader
|
||||
return cm.cluster.GetAPIReader()
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetWebhookServer() *webhook.Server {
|
||||
if cm.webhookServer == nil {
|
||||
cm.webhookServer = &webhook.Server{
|
||||
Port: cm.port,
|
||||
Host: cm.host,
|
||||
CertDir: cm.certDir,
|
||||
cm.webhookServerOnce.Do(func() {
|
||||
if cm.webhookServer == nil {
|
||||
cm.webhookServer = &webhook.Server{
|
||||
Port: cm.port,
|
||||
Host: cm.host,
|
||||
CertDir: cm.certDir,
|
||||
}
|
||||
}
|
||||
if err := cm.Add(cm.webhookServer); err != nil {
|
||||
panic("unable to add webhookServer to the controller manager")
|
||||
panic("unable to add webhook server to the controller manager")
|
||||
}
|
||||
}
|
||||
})
|
||||
return cm.webhookServer
|
||||
}
|
||||
|
||||
func (cm *controllerManager) serveMetrics(stop <-chan struct{}) {
|
||||
func (cm *controllerManager) GetLogger() logr.Logger {
|
||||
return cm.logger
|
||||
}
|
||||
|
||||
func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec {
|
||||
return cm.controllerOptions
|
||||
}
|
||||
|
||||
func (cm *controllerManager) serveMetrics() {
|
||||
handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{
|
||||
ErrorHandling: promhttp.HTTPErrorOnError,
|
||||
})
|
||||
@ -389,113 +379,228 @@ func (cm *controllerManager) serveMetrics(stop <-chan struct{}) {
|
||||
Handler: mux,
|
||||
}
|
||||
// Run the server
|
||||
go func() {
|
||||
log.Info("starting metrics server", "path", defaultMetricsEndpoint)
|
||||
cm.startRunnable(RunnableFunc(func(_ context.Context) error {
|
||||
cm.logger.Info("starting metrics server", "path", defaultMetricsEndpoint)
|
||||
if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed {
|
||||
cm.errSignal.SignalError(err)
|
||||
return err
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}))
|
||||
|
||||
// Shutdown the server when stop is closed
|
||||
<-stop
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
<-cm.internalProceduresStop
|
||||
if err := server.Shutdown(cm.shutdownCtx); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *controllerManager) serveHealthProbes(stop <-chan struct{}) {
|
||||
// TODO(hypnoglow): refactor locking to use anonymous func in the similar way
|
||||
// it's done in serveMetrics.
|
||||
cm.mu.Lock()
|
||||
func (cm *controllerManager) serveHealthProbes() {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
if cm.readyzHandler != nil {
|
||||
mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
|
||||
}
|
||||
if cm.healthzHandler != nil {
|
||||
mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||
}
|
||||
|
||||
server := http.Server{
|
||||
Handler: mux,
|
||||
}
|
||||
// Run server
|
||||
go func() {
|
||||
if err := server.Serve(cm.healthProbeListener); err != nil && err != http.ErrServerClosed {
|
||||
cm.errSignal.SignalError(err)
|
||||
|
||||
func() {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if cm.readyzHandler != nil {
|
||||
mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
|
||||
// Append '/' suffix to handle subpaths
|
||||
mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler))
|
||||
}
|
||||
if cm.healthzHandler != nil {
|
||||
mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||
// Append '/' suffix to handle subpaths
|
||||
mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler))
|
||||
}
|
||||
|
||||
// Run server
|
||||
cm.startRunnable(RunnableFunc(func(_ context.Context) error {
|
||||
if err := server.Serve(cm.healthProbeListener); err != nil && err != http.ErrServerClosed {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
cm.healthzStarted = true
|
||||
}()
|
||||
cm.healthzStarted = true
|
||||
cm.mu.Unlock()
|
||||
|
||||
// Shutdown the server when stop is closed
|
||||
<-stop
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
<-cm.internalProceduresStop
|
||||
if err := server.Shutdown(cm.shutdownCtx); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *controllerManager) Start(stop <-chan struct{}) error {
|
||||
// join the passed-in stop channel as an upstream feeding into cm.internalStopper
|
||||
defer close(cm.internalStopper)
|
||||
func (cm *controllerManager) Start(ctx context.Context) (err error) {
|
||||
if err := cm.Add(cm.cluster); err != nil {
|
||||
return fmt.Errorf("failed to add cluster to runnables: %w", err)
|
||||
}
|
||||
cm.internalCtx, cm.internalCancel = context.WithCancel(ctx)
|
||||
|
||||
// This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request
|
||||
stopComplete := make(chan struct{})
|
||||
defer close(stopComplete)
|
||||
// This must be deferred after closing stopComplete, otherwise we deadlock.
|
||||
defer func() {
|
||||
// https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg
|
||||
stopErr := cm.engageStopProcedure(stopComplete)
|
||||
if stopErr != nil {
|
||||
if err != nil {
|
||||
// Utilerrors.Aggregate allows to use errors.Is for all contained errors
|
||||
// whereas fmt.Errorf allows wrapping at most one error which means the
|
||||
// other one can not be found anymore.
|
||||
err = kerrors.NewAggregate([]error{err, stopErr})
|
||||
} else {
|
||||
err = stopErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// initialize this here so that we reset the signal channel state on every start
|
||||
cm.errSignal = &errSignaler{errSignal: make(chan struct{})}
|
||||
// Everything that might write into this channel must be started in a new goroutine,
|
||||
// because otherwise we might block this routine trying to write into the full channel
|
||||
// and will not be able to enter the deferred cm.engageStopProcedure() which drains
|
||||
// it.
|
||||
cm.errChan = make(chan error)
|
||||
|
||||
// Metrics should be served whether the controller is leader or not.
|
||||
// (If we don't serve metrics for non-leaders, prometheus will still scrape
|
||||
// the pod but will get a connection refused)
|
||||
if cm.metricsListener != nil {
|
||||
go cm.serveMetrics(cm.internalStop)
|
||||
go cm.serveMetrics()
|
||||
}
|
||||
|
||||
// Serve health probes
|
||||
if cm.healthProbeListener != nil {
|
||||
go cm.serveHealthProbes(cm.internalStop)
|
||||
go cm.serveHealthProbes()
|
||||
}
|
||||
|
||||
go cm.startNonLeaderElectionRunnables()
|
||||
|
||||
if cm.resourceLock != nil {
|
||||
err := cm.startLeaderElection()
|
||||
if err != nil {
|
||||
return err
|
||||
go func() {
|
||||
if cm.resourceLock != nil {
|
||||
err := cm.startLeaderElection()
|
||||
if err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
} else {
|
||||
// Treat not having leader election enabled the same as being elected.
|
||||
cm.startLeaderElectionRunnables()
|
||||
close(cm.elected)
|
||||
}
|
||||
} else {
|
||||
// Treat not having leader election enabled the same as being elected.
|
||||
close(cm.elected)
|
||||
go cm.startLeaderElectionRunnables()
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-stop:
|
||||
case <-ctx.Done():
|
||||
// We are done
|
||||
return nil
|
||||
case <-cm.errSignal.GotError():
|
||||
// Error starting a controller
|
||||
return cm.errSignal.Error()
|
||||
case err := <-cm.errChan:
|
||||
// Error starting or running a runnable
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// engageStopProcedure signals all runnables to stop, reads potential errors
|
||||
// from the errChan and waits for them to end. It must not be called more than once.
|
||||
func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error {
|
||||
// Populate the shutdown context.
|
||||
var shutdownCancel context.CancelFunc
|
||||
if cm.gracefulShutdownTimeout > 0 {
|
||||
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
|
||||
} else {
|
||||
cm.shutdownCtx, shutdownCancel = context.WithCancel(context.Background())
|
||||
}
|
||||
defer shutdownCancel()
|
||||
|
||||
// Cancel the internal stop channel and wait for the procedures to stop and complete.
|
||||
close(cm.internalProceduresStop)
|
||||
cm.internalCancel()
|
||||
|
||||
// Start draining the errors before acquiring the lock to make sure we don't deadlock
|
||||
// if something that has the lock is blocked on trying to write into the unbuffered
|
||||
// channel after something else already wrote into it.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case err, ok := <-cm.errChan:
|
||||
if ok {
|
||||
cm.logger.Error(err, "error received after stop sequence was engaged")
|
||||
}
|
||||
case <-stopComplete:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
if cm.gracefulShutdownTimeout == 0 {
|
||||
return nil
|
||||
}
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
cm.stopProcedureEngaged = true
|
||||
|
||||
// we want to close this after the other runnables stop, because we don't
|
||||
// want things like leader election to try and emit events on a closed
|
||||
// channel
|
||||
defer cm.recorderProvider.Stop(cm.shutdownCtx)
|
||||
return cm.waitForRunnableToEnd(shutdownCancel)
|
||||
}
|
||||
|
||||
// waitForRunnableToEnd blocks until all runnables ended or the
|
||||
// tearDownTimeout was reached. In the latter case, an error is returned.
|
||||
func (cm *controllerManager) waitForRunnableToEnd(shutdownCancel context.CancelFunc) (retErr error) {
|
||||
// Cancel leader election only after we waited. It will os.Exit() the app for safety.
|
||||
defer func() {
|
||||
if retErr == nil && cm.leaderElectionCancel != nil {
|
||||
// After asking the context to be cancelled, make sure
|
||||
// we wait for the leader stopped channel to be closed, otherwise
|
||||
// we might encounter race conditions between this code
|
||||
// and the event recorder, which is used within leader election code.
|
||||
cm.leaderElectionCancel()
|
||||
<-cm.leaderElectionStopped
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
cm.waitForRunnable.Wait()
|
||||
shutdownCancel()
|
||||
}()
|
||||
|
||||
<-cm.shutdownCtx.Done()
|
||||
if err := cm.shutdownCtx.Err(); err != nil && err != context.Canceled {
|
||||
return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startNonLeaderElectionRunnables() {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
cm.waitForCache()
|
||||
// First start any webhook servers, which includes conversion, validation, and defaulting
|
||||
// webhooks that are registered.
|
||||
//
|
||||
// WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition
|
||||
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
|
||||
// to never start because no cache can be populated.
|
||||
for _, c := range cm.nonLeaderElectionRunnables {
|
||||
if _, ok := c.(*webhook.Server); ok {
|
||||
cm.startRunnable(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Start and wait for caches.
|
||||
cm.waitForCache(cm.internalCtx)
|
||||
|
||||
// Start the non-leaderelection Runnables after the cache has synced
|
||||
for _, c := range cm.nonLeaderElectionRunnables {
|
||||
if _, ok := c.(*webhook.Server); ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Controllers block, but we want to return an error if any have an error starting.
|
||||
// Write any Start errors to a channel so we can return them
|
||||
ctrl := c
|
||||
go func() {
|
||||
if err := ctrl.Start(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
// we use %T here because we don't have a good stand-in for "name",
|
||||
// and the full runnable might not serialize (mutexes, etc)
|
||||
log.V(1).Info("non-leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl))
|
||||
}()
|
||||
cm.startRunnable(c)
|
||||
}
|
||||
}
|
||||
|
||||
@ -503,48 +608,56 @@ func (cm *controllerManager) startLeaderElectionRunnables() {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
cm.waitForCache()
|
||||
cm.waitForCache(cm.internalCtx)
|
||||
|
||||
// Start the leader election Runnables after the cache has synced
|
||||
for _, c := range cm.leaderElectionRunnables {
|
||||
// Controllers block, but we want to return an error if any have an error starting.
|
||||
// Write any Start errors to a channel so we can return them
|
||||
ctrl := c
|
||||
go func() {
|
||||
if err := ctrl.Start(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
// we use %T here because we don't have a good stand-in for "name",
|
||||
// and the full runnable might not serialize (mutexes, etc)
|
||||
log.V(1).Info("leader-election runnable finished", "runnable type", fmt.Sprintf("%T", ctrl))
|
||||
}()
|
||||
cm.startRunnable(c)
|
||||
}
|
||||
|
||||
cm.startedLeader = true
|
||||
}
|
||||
|
||||
func (cm *controllerManager) waitForCache() {
|
||||
func (cm *controllerManager) waitForCache(ctx context.Context) {
|
||||
if cm.started {
|
||||
return
|
||||
}
|
||||
|
||||
// Start the Cache. Allow the function to start the cache to be mocked out for testing
|
||||
if cm.startCache == nil {
|
||||
cm.startCache = cm.cache.Start
|
||||
for _, cache := range cm.caches {
|
||||
cm.startRunnable(cache)
|
||||
}
|
||||
go func() {
|
||||
if err := cm.startCache(cm.internalStop); err != nil {
|
||||
cm.errSignal.SignalError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the caches to sync.
|
||||
// TODO(community): Check the return value and write a test
|
||||
cm.cache.WaitForCacheSync(cm.internalStop)
|
||||
for _, cache := range cm.caches {
|
||||
cache.GetCache().WaitForCacheSync(ctx)
|
||||
}
|
||||
// TODO: This should be the return value of cm.cache.WaitForCacheSync but we abuse
|
||||
// cm.started as check if we already started the cache so it must always become true.
|
||||
// Making sure that the cache doesn't get started twice is needed to not get a "close
|
||||
// of closed channel" panic
|
||||
cm.started = true
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startLeaderElection() (err error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cm.mu.Lock()
|
||||
cm.leaderElectionCancel = cancel
|
||||
cm.mu.Unlock()
|
||||
|
||||
if cm.onStoppedLeading == nil {
|
||||
cm.onStoppedLeading = func() {
|
||||
// Make sure graceful shutdown is skipped if we lost the leader lock without
|
||||
// intending to.
|
||||
cm.gracefulShutdownTimeout = time.Duration(0)
|
||||
// Most implementations of leader election log.Fatal() here.
|
||||
// Since Start is wrapped in log.Fatal when called, we can just return
|
||||
// an error here which will cause the program to exit.
|
||||
cm.errChan <- errors.New("leader election lost")
|
||||
}
|
||||
}
|
||||
l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||
Lock: cm.resourceLock,
|
||||
LeaseDuration: cm.leaseDuration,
|
||||
@ -552,35 +665,36 @@ func (cm *controllerManager) startLeaderElection() (err error) {
|
||||
RetryPeriod: cm.retryPeriod,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(_ context.Context) {
|
||||
close(cm.elected)
|
||||
cm.startLeaderElectionRunnables()
|
||||
close(cm.elected)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
// Most implementations of leader election log.Fatal() here.
|
||||
// Since Start is wrapped in log.Fatal when called, we can just return
|
||||
// an error here which will cause the program to exit.
|
||||
cm.errSignal.SignalError(fmt.Errorf("leader election lost"))
|
||||
},
|
||||
OnStoppedLeading: cm.onStoppedLeading,
|
||||
},
|
||||
ReleaseOnCancel: cm.leaderElectionReleaseOnCancel,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
select {
|
||||
case <-cm.internalStop:
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the leader elector process
|
||||
go l.Run(ctx)
|
||||
go func() {
|
||||
l.Run(ctx)
|
||||
<-ctx.Done()
|
||||
close(cm.leaderElectionStopped)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *controllerManager) Elected() <-chan struct{} {
|
||||
return cm.elected
|
||||
}
|
||||
|
||||
func (cm *controllerManager) startRunnable(r Runnable) {
|
||||
cm.waitForRunnable.Add(1)
|
||||
go func() {
|
||||
defer cm.waitForRunnable.Done()
|
||||
if err := r.Start(cm.internalCtx); err != nil {
|
||||
cm.errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
454
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
454
vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
generated
vendored
@ -17,33 +17,41 @@ limitations under the License.
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cluster"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config"
|
||||
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
internalrecorder "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
|
||||
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/leaderelection"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
"sigs.k8s.io/controller-runtime/pkg/recorder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables.
|
||||
// A Manager is required to create Controllers.
|
||||
type Manager interface {
|
||||
// Cluster holds a variety of methods to interact with a cluster.
|
||||
cluster.Cluster
|
||||
|
||||
// Add will set requested dependencies on the component, and cause the component to be
|
||||
// started when Start is called. Add will inject any dependencies for which the argument
|
||||
// implements the inject interface - e.g. inject.Client.
|
||||
@ -56,10 +64,6 @@ type Manager interface {
|
||||
// election was configured.
|
||||
Elected() <-chan struct{}
|
||||
|
||||
// SetFields will set any dependencies on an object for which the object has implemented the inject
|
||||
// interface - e.g. inject.Client.
|
||||
SetFields(interface{}) error
|
||||
|
||||
// AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics.
|
||||
// Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be
|
||||
// sensitive and shouldn't be exposed publicly.
|
||||
@ -73,44 +77,25 @@ type Manager interface {
|
||||
// AddReadyzCheck allows you to add Readyz checker
|
||||
AddReadyzCheck(name string, check healthz.Checker) error
|
||||
|
||||
// Start starts all registered Controllers and blocks until the Stop channel is closed.
|
||||
// Start starts all registered Controllers and blocks until the context is cancelled.
|
||||
// Returns an error if there is an error starting any controller.
|
||||
Start(<-chan struct{}) error
|
||||
|
||||
// GetConfig returns an initialized Config
|
||||
GetConfig() *rest.Config
|
||||
|
||||
// GetScheme returns an initialized Scheme
|
||||
GetScheme() *runtime.Scheme
|
||||
|
||||
// GetClient returns a client configured with the Config. This client may
|
||||
// not be a fully "direct" client -- it may read from a cache, for
|
||||
// instance. See Options.NewClient for more information on how the default
|
||||
// implementation works.
|
||||
GetClient() client.Client
|
||||
|
||||
// GetFieldIndexer returns a client.FieldIndexer configured with the client
|
||||
GetFieldIndexer() client.FieldIndexer
|
||||
|
||||
// GetCache returns a cache.Cache
|
||||
GetCache() cache.Cache
|
||||
|
||||
// GetEventRecorderFor returns a new EventRecorder for the provided name
|
||||
GetEventRecorderFor(name string) record.EventRecorder
|
||||
|
||||
// GetRESTMapper returns a RESTMapper
|
||||
GetRESTMapper() meta.RESTMapper
|
||||
|
||||
// GetAPIReader returns a reader that will be configured to use the API server.
|
||||
// This should be used sparingly and only when the client does not fit your
|
||||
// use case.
|
||||
GetAPIReader() client.Reader
|
||||
//
|
||||
// If LeaderElection is used, the binary must be exited immediately after this returns,
|
||||
// otherwise components that need leader election might continue to run after the leader
|
||||
// lock was lost.
|
||||
Start(ctx context.Context) error
|
||||
|
||||
// GetWebhookServer returns a webhook.Server
|
||||
GetWebhookServer() *webhook.Server
|
||||
|
||||
// GetLogger returns this manager's logger.
|
||||
GetLogger() logr.Logger
|
||||
|
||||
// GetControllerOptions returns controller global configuration options.
|
||||
GetControllerOptions() v1alpha1.ControllerConfigurationSpec
|
||||
}
|
||||
|
||||
// Options are the arguments for creating a new Manager
|
||||
// Options are the arguments for creating a new Manager.
|
||||
type Options struct {
|
||||
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources
|
||||
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
|
||||
@ -126,25 +111,74 @@ type Options struct {
|
||||
// value only if you know what you are doing. Defaults to 10 hours if unset.
|
||||
// there will a 10 percent jitter between the SyncPeriod of all controllers
|
||||
// so that all controllers will not send list requests simultaneously.
|
||||
//
|
||||
// This applies to all controllers.
|
||||
//
|
||||
// A period sync happens for two reasons:
|
||||
// 1. To insure against a bug in the controller that causes an object to not
|
||||
// be requeued, when it otherwise should be requeued.
|
||||
// 2. To insure against an unknown bug in controller-runtime, or its dependencies,
|
||||
// that causes an object to not be requeued, when it otherwise should be
|
||||
// requeued, or to be removed from the queue, when it otherwise should not
|
||||
// be removed.
|
||||
//
|
||||
// If you want
|
||||
// 1. to insure against missed watch events, or
|
||||
// 2. to poll services that cannot be watched,
|
||||
// then we recommend that, instead of changing the default period, the
|
||||
// controller requeue, with a constant duration `t`, whenever the controller
|
||||
// is "done" with an object, and would otherwise not requeue it, i.e., we
|
||||
// recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`,
|
||||
// instead of `reconcile.Result{}`.
|
||||
SyncPeriod *time.Duration
|
||||
|
||||
// Logger is the logger that should be used by this manager.
|
||||
// If none is set, it defaults to log.Log global logger.
|
||||
Logger logr.Logger
|
||||
|
||||
// LeaderElection determines whether or not to use leader election when
|
||||
// starting the manager.
|
||||
LeaderElection bool
|
||||
|
||||
// LeaderElectionResourceLock determines which resource lock to use for leader election,
|
||||
// defaults to "configmapsleases". Change this value only if you know what you are doing.
|
||||
// Otherwise, users of your controller might end up with multiple running instances that
|
||||
// each acquired leadership through different resource locks during upgrades and thus
|
||||
// act on the same resources concurrently.
|
||||
// If you want to migrate to the "leases" resource lock, you might do so by migrating to the
|
||||
// respective multilock first ("configmapsleases" or "endpointsleases"), which will acquire a
|
||||
// leader lock on both resources. After all your users have migrated to the multilock, you can
|
||||
// go ahead and migrate to "leases". Please also keep in mind, that users might skip versions
|
||||
// of your controller.
|
||||
//
|
||||
// Note: before controller-runtime version v0.7, the resource lock was set to "configmaps".
|
||||
// Please keep this in mind, when planning a proper migration path for your controller.
|
||||
LeaderElectionResourceLock string
|
||||
|
||||
// LeaderElectionNamespace determines the namespace in which the leader
|
||||
// election configmap will be created.
|
||||
// election resource will be created.
|
||||
LeaderElectionNamespace string
|
||||
|
||||
// LeaderElectionID determines the name of the configmap that leader election
|
||||
// LeaderElectionID determines the name of the resource that leader election
|
||||
// will use for holding the leader lock.
|
||||
LeaderElectionID string
|
||||
|
||||
// LeaderElectionConfig can be specified to override the default configuration
|
||||
// that is used to build the leader election client.
|
||||
LeaderElectionConfig *rest.Config
|
||||
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader doesn't have to wait
|
||||
// LeaseDuration time first.
|
||||
LeaderElectionReleaseOnCancel bool
|
||||
|
||||
// LeaseDuration is the duration that non-leader candidates will
|
||||
// wait to force acquire leadership. This is measured against time of
|
||||
// last observed ack. Default is 15 seconds.
|
||||
LeaseDuration *time.Duration
|
||||
// RenewDeadline is the duration that the acting master will retry
|
||||
// RenewDeadline is the duration that the acting controlplane will retry
|
||||
// refreshing leadership before giving up. Default is 10 seconds.
|
||||
RenewDeadline *time.Duration
|
||||
// RetryPeriod is the duration the LeaderElector clients should wait
|
||||
@ -175,27 +209,38 @@ type Options struct {
|
||||
LivenessEndpointName string
|
||||
|
||||
// Port is the port that the webhook server serves at.
|
||||
// It is used to set webhook.Server.Port.
|
||||
// It is used to set webhook.Server.Port if WebhookServer is not set.
|
||||
Port int
|
||||
// Host is the hostname that the webhook server binds to.
|
||||
// It is used to set webhook.Server.Host.
|
||||
// It is used to set webhook.Server.Host if WebhookServer is not set.
|
||||
Host string
|
||||
|
||||
// CertDir is the directory that contains the server key and certificate.
|
||||
// if not set, webhook server would look up the server key and certificate in
|
||||
// If not set, webhook server would look up the server key and certificate in
|
||||
// {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate
|
||||
// must be named tls.key and tls.crt, respectively.
|
||||
// It is used to set webhook.Server.CertDir if WebhookServer is not set.
|
||||
CertDir string
|
||||
|
||||
// WebhookServer is an externally configured webhook.Server. By default,
|
||||
// a Manager will create a default server using Port, Host, and CertDir;
|
||||
// if this is set, the Manager will use this server instead.
|
||||
WebhookServer *webhook.Server
|
||||
|
||||
// Functions to all for a user to customize the values that will be injected.
|
||||
|
||||
// NewCache is the function that will create the cache to be used
|
||||
// by the manager. If not set this will use the default new cache function.
|
||||
NewCache cache.NewCacheFunc
|
||||
|
||||
// NewClient will create the client to be used by the manager.
|
||||
// NewClient is the func that creates the client to be used by the manager.
|
||||
// If not set this will create the default DelegatingClient that will
|
||||
// use the cache for reads and the client for writes.
|
||||
NewClient NewClientFunc
|
||||
NewClient cluster.NewClientFunc
|
||||
|
||||
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
|
||||
// for the given objects.
|
||||
ClientDisableCacheFor []client.Object
|
||||
|
||||
// DryRunClient specifies whether the client should be configured to enforce
|
||||
// dryRun mode.
|
||||
@ -203,36 +248,53 @@ type Options struct {
|
||||
|
||||
// EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API
|
||||
// Use this to customize the event correlator and spam filter
|
||||
//
|
||||
// Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers
|
||||
// is shorter than the lifetime of your process.
|
||||
EventBroadcaster record.EventBroadcaster
|
||||
|
||||
// GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop.
|
||||
// To disable graceful shutdown, set to time.Duration(0)
|
||||
// To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1)
|
||||
// The graceful shutdown is skipped for safety reasons in case the leader election lease is lost.
|
||||
GracefulShutdownTimeout *time.Duration
|
||||
|
||||
// Controller contains global configuration options for controllers
|
||||
// registered within this manager.
|
||||
// +optional
|
||||
Controller v1alpha1.ControllerConfigurationSpec
|
||||
|
||||
// makeBroadcaster allows deferring the creation of the broadcaster to
|
||||
// avoid leaking goroutines if we never call Start on this manager. It also
|
||||
// returns whether or not this is a "owned" broadcaster, and as such should be
|
||||
// stopped with the manager.
|
||||
makeBroadcaster intrec.EventBroadcasterProducer
|
||||
|
||||
// Dependency injection for testing
|
||||
newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, broadcaster record.EventBroadcaster) (recorder.Provider, error)
|
||||
newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error)
|
||||
newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error)
|
||||
newMetricsListener func(addr string) (net.Listener, error)
|
||||
newHealthProbeListener func(addr string) (net.Listener, error)
|
||||
}
|
||||
|
||||
// NewClientFunc allows a user to define how to create a client
|
||||
type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error)
|
||||
|
||||
// Runnable allows a component to be started.
|
||||
// It's very important that Start blocks until
|
||||
// it's done running.
|
||||
type Runnable interface {
|
||||
// Start starts running the component. The component will stop running
|
||||
// when the channel is closed. Start blocks until the channel is closed or
|
||||
// when the context is closed. Start blocks until the context is closed or
|
||||
// an error occurs.
|
||||
Start(<-chan struct{}) error
|
||||
Start(context.Context) error
|
||||
}
|
||||
|
||||
// RunnableFunc implements Runnable using a function.
|
||||
// It's very important that the given function block
|
||||
// until it's done running.
|
||||
type RunnableFunc func(<-chan struct{}) error
|
||||
type RunnableFunc func(context.Context) error
|
||||
|
||||
// Start implements Runnable
|
||||
func (r RunnableFunc) Start(s <-chan struct{}) error {
|
||||
return r(s)
|
||||
// Start implements Runnable.
|
||||
func (r RunnableFunc) Start(ctx context.Context) error {
|
||||
return r(ctx)
|
||||
}
|
||||
|
||||
// LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode.
|
||||
@ -244,54 +306,43 @@ type LeaderElectionRunnable interface {
|
||||
|
||||
// New returns a new Manager for creating Controllers.
|
||||
func New(config *rest.Config, options Options) (Manager, error) {
|
||||
// Initialize a rest.config if none was specified
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("must specify Config")
|
||||
}
|
||||
|
||||
// Set default values for options fields
|
||||
options = setOptionsDefaults(options)
|
||||
|
||||
// Create the mapper provider
|
||||
mapper, err := options.MapperProvider(config)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get API Group-Resources")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the cache for the cached read client and registering informers
|
||||
cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace})
|
||||
cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) {
|
||||
clusterOptions.Scheme = options.Scheme
|
||||
clusterOptions.MapperProvider = options.MapperProvider
|
||||
clusterOptions.Logger = options.Logger
|
||||
clusterOptions.SyncPeriod = options.SyncPeriod
|
||||
clusterOptions.Namespace = options.Namespace
|
||||
clusterOptions.NewCache = options.NewCache
|
||||
clusterOptions.NewClient = options.NewClient
|
||||
clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor
|
||||
clusterOptions.DryRunClient = options.DryRunClient
|
||||
clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiReader, err := client.New(config, client.Options{Scheme: options.Scheme, Mapper: mapper})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
writeObj, err := options.NewClient(cache, config, client.Options{Scheme: options.Scheme, Mapper: mapper})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if options.DryRunClient {
|
||||
writeObj = client.NewDryRunClient(writeObj)
|
||||
}
|
||||
|
||||
// Create the recorder provider to inject event recorders for the components.
|
||||
// TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific
|
||||
// to the particular controller that it's being injected into, rather than a generic one like is here.
|
||||
recorderProvider, err := options.newRecorderProvider(config, options.Scheme, log.WithName("events"), options.EventBroadcaster)
|
||||
recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the resource lock to enable leader election)
|
||||
resourceLock, err := options.newResourceLock(config, recorderProvider, leaderelection.Options{
|
||||
LeaderElection: options.LeaderElection,
|
||||
LeaderElectionID: options.LeaderElectionID,
|
||||
LeaderElectionNamespace: options.LeaderElectionNamespace,
|
||||
leaderConfig := options.LeaderElectionConfig
|
||||
if leaderConfig == nil {
|
||||
leaderConfig = rest.CopyConfig(config)
|
||||
}
|
||||
resourceLock, err := options.newResourceLock(leaderConfig, recorderProvider, leaderelection.Options{
|
||||
LeaderElection: options.LeaderElection,
|
||||
LeaderElectionResourceLock: options.LeaderElectionResourceLock,
|
||||
LeaderElectionID: options.LeaderElectionID,
|
||||
LeaderElectionNamespace: options.LeaderElectionNamespace,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -314,54 +365,141 @@ func New(config *rest.Config, options Options) (Manager, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
|
||||
return &controllerManager{
|
||||
config: config,
|
||||
scheme: options.Scheme,
|
||||
cache: cache,
|
||||
fieldIndexes: cache,
|
||||
client: writeObj,
|
||||
apiReader: apiReader,
|
||||
recorderProvider: recorderProvider,
|
||||
resourceLock: resourceLock,
|
||||
mapper: mapper,
|
||||
metricsListener: metricsListener,
|
||||
metricsExtraHandlers: metricsExtraHandlers,
|
||||
internalStop: stop,
|
||||
internalStopper: stop,
|
||||
elected: make(chan struct{}),
|
||||
port: options.Port,
|
||||
host: options.Host,
|
||||
certDir: options.CertDir,
|
||||
leaseDuration: *options.LeaseDuration,
|
||||
renewDeadline: *options.RenewDeadline,
|
||||
retryPeriod: *options.RetryPeriod,
|
||||
healthProbeListener: healthProbeListener,
|
||||
readinessEndpointName: options.ReadinessEndpointName,
|
||||
livenessEndpointName: options.LivenessEndpointName,
|
||||
cluster: cluster,
|
||||
recorderProvider: recorderProvider,
|
||||
resourceLock: resourceLock,
|
||||
metricsListener: metricsListener,
|
||||
metricsExtraHandlers: metricsExtraHandlers,
|
||||
controllerOptions: options.Controller,
|
||||
logger: options.Logger,
|
||||
elected: make(chan struct{}),
|
||||
port: options.Port,
|
||||
host: options.Host,
|
||||
certDir: options.CertDir,
|
||||
webhookServer: options.WebhookServer,
|
||||
leaseDuration: *options.LeaseDuration,
|
||||
renewDeadline: *options.RenewDeadline,
|
||||
retryPeriod: *options.RetryPeriod,
|
||||
healthProbeListener: healthProbeListener,
|
||||
readinessEndpointName: options.ReadinessEndpointName,
|
||||
livenessEndpointName: options.LivenessEndpointName,
|
||||
gracefulShutdownTimeout: *options.GracefulShutdownTimeout,
|
||||
internalProceduresStop: make(chan struct{}),
|
||||
leaderElectionStopped: make(chan struct{}),
|
||||
leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// defaultNewClient creates the default caching client
|
||||
func defaultNewClient(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) {
|
||||
// Create the Client for Write operations.
|
||||
c, err := client.New(config, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// AndFrom will use a supplied type and convert to Options
|
||||
// any options already set on Options will be ignored, this is used to allow
|
||||
// cli flags to override anything specified in the config file.
|
||||
func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) {
|
||||
if inj, wantsScheme := loader.(inject.Scheme); wantsScheme {
|
||||
err := inj.InjectScheme(o.Scheme)
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
}
|
||||
|
||||
return &client.DelegatingClient{
|
||||
Reader: &client.DelegatingReader{
|
||||
CacheReader: cache,
|
||||
ClientReader: c,
|
||||
},
|
||||
Writer: c,
|
||||
StatusClient: c,
|
||||
}, nil
|
||||
newObj, err := loader.Complete()
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
|
||||
o = o.setLeaderElectionConfig(newObj)
|
||||
|
||||
if o.SyncPeriod == nil && newObj.SyncPeriod != nil {
|
||||
o.SyncPeriod = &newObj.SyncPeriod.Duration
|
||||
}
|
||||
|
||||
if o.Namespace == "" && newObj.CacheNamespace != "" {
|
||||
o.Namespace = newObj.CacheNamespace
|
||||
}
|
||||
|
||||
if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" {
|
||||
o.MetricsBindAddress = newObj.Metrics.BindAddress
|
||||
}
|
||||
|
||||
if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" {
|
||||
o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress
|
||||
}
|
||||
|
||||
if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" {
|
||||
o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName
|
||||
}
|
||||
|
||||
if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" {
|
||||
o.LivenessEndpointName = newObj.Health.LivenessEndpointName
|
||||
}
|
||||
|
||||
if o.Port == 0 && newObj.Webhook.Port != nil {
|
||||
o.Port = *newObj.Webhook.Port
|
||||
}
|
||||
|
||||
if o.Host == "" && newObj.Webhook.Host != "" {
|
||||
o.Host = newObj.Webhook.Host
|
||||
}
|
||||
|
||||
if o.CertDir == "" && newObj.Webhook.CertDir != "" {
|
||||
o.CertDir = newObj.Webhook.CertDir
|
||||
}
|
||||
|
||||
if newObj.Controller != nil {
|
||||
if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil {
|
||||
o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout
|
||||
}
|
||||
|
||||
if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 {
|
||||
o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency
|
||||
}
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// defaultHealthProbeListener creates the default health probes listener bound to the given address
|
||||
// AndFromOrDie will use options.AndFrom() and will panic if there are errors.
|
||||
func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options {
|
||||
o, err := o.AndFrom(loader)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not parse config file: %v", err))
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options {
|
||||
if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil {
|
||||
o.LeaderElection = *obj.LeaderElection.LeaderElect
|
||||
}
|
||||
|
||||
if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" {
|
||||
o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock
|
||||
}
|
||||
|
||||
if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" {
|
||||
o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace
|
||||
}
|
||||
|
||||
if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" {
|
||||
o.LeaderElectionID = obj.LeaderElection.ResourceName
|
||||
}
|
||||
|
||||
if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) {
|
||||
o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration
|
||||
}
|
||||
|
||||
if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) {
|
||||
o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration
|
||||
}
|
||||
|
||||
if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) {
|
||||
o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// defaultHealthProbeListener creates the default health probes listener bound to the given address.
|
||||
func defaultHealthProbeListener(addr string) (net.Listener, error) {
|
||||
if addr == "" || addr == "0" {
|
||||
return nil, nil
|
||||
@ -374,37 +512,30 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) {
|
||||
return ln, nil
|
||||
}
|
||||
|
||||
// setOptionsDefaults set default values for Options fields
|
||||
// setOptionsDefaults set default values for Options fields.
|
||||
func setOptionsDefaults(options Options) Options {
|
||||
// Use the Kubernetes client-go scheme if none is specified
|
||||
if options.Scheme == nil {
|
||||
options.Scheme = scheme.Scheme
|
||||
}
|
||||
|
||||
if options.MapperProvider == nil {
|
||||
options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) {
|
||||
return apiutil.NewDynamicRESTMapper(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Allow newClient to be mocked
|
||||
if options.NewClient == nil {
|
||||
options.NewClient = defaultNewClient
|
||||
}
|
||||
|
||||
// Allow newCache to be mocked
|
||||
if options.NewCache == nil {
|
||||
options.NewCache = cache.New
|
||||
// Allow newResourceLock to be mocked
|
||||
if options.newResourceLock == nil {
|
||||
options.newResourceLock = leaderelection.NewResourceLock
|
||||
}
|
||||
|
||||
// Allow newRecorderProvider to be mocked
|
||||
if options.newRecorderProvider == nil {
|
||||
options.newRecorderProvider = internalrecorder.NewProvider
|
||||
options.newRecorderProvider = intrec.NewProvider
|
||||
}
|
||||
|
||||
// Allow newResourceLock to be mocked
|
||||
if options.newResourceLock == nil {
|
||||
options.newResourceLock = leaderelection.NewResourceLock
|
||||
// This is duplicated with pkg/cluster, we need it here
|
||||
// for the leader election and there to provide the user with
|
||||
// an EventBroadcaster
|
||||
if options.EventBroadcaster == nil {
|
||||
// defer initialization to avoid leaking by default
|
||||
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
|
||||
return record.NewBroadcaster(), true
|
||||
}
|
||||
} else {
|
||||
options.makeBroadcaster = func() (record.EventBroadcaster, bool) {
|
||||
return options.EventBroadcaster, false
|
||||
}
|
||||
}
|
||||
|
||||
if options.newMetricsListener == nil {
|
||||
@ -423,10 +554,6 @@ func setOptionsDefaults(options Options) Options {
|
||||
options.RetryPeriod = &retryPeriod
|
||||
}
|
||||
|
||||
if options.EventBroadcaster == nil {
|
||||
options.EventBroadcaster = record.NewBroadcaster()
|
||||
}
|
||||
|
||||
if options.ReadinessEndpointName == "" {
|
||||
options.ReadinessEndpointName = defaultReadinessEndpoint
|
||||
}
|
||||
@ -439,5 +566,14 @@ func setOptionsDefaults(options Options) Options {
|
||||
options.newHealthProbeListener = defaultHealthProbeListener
|
||||
}
|
||||
|
||||
if options.GracefulShutdownTimeout == nil {
|
||||
gracefulShutdownTimeout := defaultGracefulShutdownPeriod
|
||||
options.GracefulShutdownTimeout = &gracefulShutdownTimeout
|
||||
}
|
||||
|
||||
if options.Logger == nil {
|
||||
options.Logger = logf.RuntimeLog.WithName("manager")
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
10
vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go
generated
vendored
10
vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package signals
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
@ -26,18 +27,19 @@ var onlyOneSignalHandler = make(chan struct{})
|
||||
// SetupSignalHandler registers for SIGTERM and SIGINT. A stop channel is returned
|
||||
// which is closed on one of these signals. If a second signal is caught, the program
|
||||
// is terminated with exit code 1.
|
||||
func SetupSignalHandler() (stopCh <-chan struct{}) {
|
||||
func SetupSignalHandler() context.Context {
|
||||
close(onlyOneSignalHandler) // panics when called twice
|
||||
|
||||
stop := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
c := make(chan os.Signal, 2)
|
||||
signal.Notify(c, shutdownSignals...)
|
||||
go func() {
|
||||
<-c
|
||||
close(stop)
|
||||
cancel()
|
||||
<-c
|
||||
os.Exit(1) // second signal. Exit directly.
|
||||
}()
|
||||
|
||||
return stop
|
||||
return ctx
|
||||
}
|
||||
|
Reference in New Issue
Block a user