mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-27 16:50:23 +00:00
553 lines
20 KiB
Go
553 lines
20 KiB
Go
/*
|
|
Copyright 2014 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
// Package app implements a server that runs a set of active
|
|
// components. This includes replication controllers, service endpoints and
|
|
// nodes.
|
|
//
|
|
package app
|
|
|
|
import (
|
|
"fmt"
|
|
"io/ioutil"
|
|
"math/rand"
|
|
"net"
|
|
"net/http"
|
|
"net/http/pprof"
|
|
"os"
|
|
goruntime "runtime"
|
|
"strconv"
|
|
"time"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
|
|
"k8s.io/apiserver/pkg/server/healthz"
|
|
|
|
"k8s.io/api/core/v1"
|
|
"k8s.io/client-go/discovery"
|
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
restclient "k8s.io/client-go/rest"
|
|
"k8s.io/client-go/tools/clientcmd"
|
|
"k8s.io/client-go/tools/record"
|
|
certutil "k8s.io/client-go/util/cert"
|
|
|
|
"k8s.io/client-go/informers"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/tools/leaderelection"
|
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
|
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
|
"k8s.io/kubernetes/pkg/controller"
|
|
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
|
"k8s.io/kubernetes/pkg/serviceaccount"
|
|
"k8s.io/kubernetes/pkg/util/configz"
|
|
"k8s.io/kubernetes/pkg/version"
|
|
|
|
"github.com/golang/glog"
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
"github.com/spf13/cobra"
|
|
"github.com/spf13/pflag"
|
|
)
|
|
|
|
const (
|
|
// Jitter used when starting controller managers
|
|
ControllerStartJitter = 1.0
|
|
)
|
|
|
|
// NewControllerManagerCommand creates a *cobra.Command object with default parameters
|
|
func NewControllerManagerCommand() *cobra.Command {
|
|
s := options.NewCMServer()
|
|
s.AddFlags(pflag.CommandLine, KnownControllers(), ControllersDisabledByDefault.List())
|
|
cmd := &cobra.Command{
|
|
Use: "kube-controller-manager",
|
|
Long: `The Kubernetes controller manager is a daemon that embeds
|
|
the core control loops shipped with Kubernetes. In applications of robotics and
|
|
automation, a control loop is a non-terminating loop that regulates the state of
|
|
the system. In Kubernetes, a controller is a control loop that watches the shared
|
|
state of the cluster through the apiserver and makes changes attempting to move the
|
|
current state towards the desired state. Examples of controllers that ship with
|
|
Kubernetes today are the replication controller, endpoints controller, namespace
|
|
controller, and serviceaccounts controller.`,
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
},
|
|
}
|
|
|
|
return cmd
|
|
}
|
|
|
|
// ResyncPeriod returns a function which generates a duration each time it is
|
|
// invoked; this is so that multiple controllers don't get into lock-step and all
|
|
// hammer the apiserver with list requests simultaneously.
|
|
func ResyncPeriod(s *options.CMServer) func() time.Duration {
|
|
return func() time.Duration {
|
|
factor := rand.Float64() + 1
|
|
return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor)
|
|
}
|
|
}
|
|
|
|
// Run runs the CMServer. This should never exit.
|
|
func Run(s *options.CMServer) error {
|
|
// To help debugging, immediately log version
|
|
glog.Infof("Version: %+v", version.Get())
|
|
if err := s.Validate(KnownControllers(), ControllersDisabledByDefault.List()); err != nil {
|
|
return err
|
|
}
|
|
|
|
if c, err := configz.New("componentconfig"); err == nil {
|
|
c.Set(s.KubeControllerManagerConfiguration)
|
|
} else {
|
|
glog.Errorf("unable to register configz: %s", err)
|
|
}
|
|
|
|
kubeClient, leaderElectionClient, kubeconfig, err := createClients(s)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
go startHTTP(s)
|
|
|
|
recorder := createRecorder(kubeClient)
|
|
|
|
run := func(stop <-chan struct{}) {
|
|
rootClientBuilder := controller.SimpleControllerClientBuilder{
|
|
ClientConfig: kubeconfig,
|
|
}
|
|
var clientBuilder controller.ControllerClientBuilder
|
|
if s.UseServiceAccountCredentials {
|
|
if len(s.ServiceAccountKeyFile) == 0 {
|
|
// It's possible another controller process is creating the tokens for us.
|
|
// If one isn't, we'll timeout and exit when our client builder is unable to create the tokens.
|
|
glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file")
|
|
}
|
|
clientBuilder = controller.SAControllerClientBuilder{
|
|
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
|
|
CoreClient: kubeClient.CoreV1(),
|
|
AuthenticationClient: kubeClient.Authentication(),
|
|
Namespace: "kube-system",
|
|
}
|
|
} else {
|
|
clientBuilder = rootClientBuilder
|
|
}
|
|
ctx, err := CreateControllerContext(s, rootClientBuilder, clientBuilder, stop)
|
|
if err != nil {
|
|
glog.Fatalf("error building controller context: %v", err)
|
|
}
|
|
saTokenControllerInitFunc := serviceAccountTokenControllerStarter{rootClientBuilder: rootClientBuilder}.startServiceAccountTokenController
|
|
|
|
if err := StartControllers(ctx, saTokenControllerInitFunc, NewControllerInitializers()); err != nil {
|
|
glog.Fatalf("error starting controllers: %v", err)
|
|
}
|
|
|
|
ctx.InformerFactory.Start(ctx.Stop)
|
|
close(ctx.InformersStarted)
|
|
|
|
select {}
|
|
}
|
|
|
|
if !s.LeaderElection.LeaderElect {
|
|
run(nil)
|
|
panic("unreachable")
|
|
}
|
|
|
|
id, err := os.Hostname()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
|
|
"kube-system",
|
|
"kube-controller-manager",
|
|
leaderElectionClient.CoreV1(),
|
|
resourcelock.ResourceLockConfig{
|
|
Identity: id,
|
|
EventRecorder: recorder,
|
|
})
|
|
if err != nil {
|
|
glog.Fatalf("error creating lock: %v", err)
|
|
}
|
|
|
|
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
|
Lock: rl,
|
|
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
|
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
|
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
|
Callbacks: leaderelection.LeaderCallbacks{
|
|
OnStartedLeading: run,
|
|
OnStoppedLeading: func() {
|
|
glog.Fatalf("leaderelection lost")
|
|
},
|
|
},
|
|
})
|
|
panic("unreachable")
|
|
}
|
|
|
|
func startHTTP(s *options.CMServer) {
|
|
mux := http.NewServeMux()
|
|
healthz.InstallHandler(mux)
|
|
if s.EnableProfiling {
|
|
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
|
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
|
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
|
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
|
if s.EnableContentionProfiling {
|
|
goruntime.SetBlockProfileRate(1)
|
|
}
|
|
}
|
|
configz.InstallHandler(mux)
|
|
mux.Handle("/metrics", prometheus.Handler())
|
|
|
|
server := &http.Server{
|
|
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
|
Handler: mux,
|
|
}
|
|
glog.Fatal(server.ListenAndServe())
|
|
}
|
|
|
|
func createRecorder(kubeClient *clientset.Clientset) record.EventRecorder {
|
|
eventBroadcaster := record.NewBroadcaster()
|
|
eventBroadcaster.StartLogging(glog.Infof)
|
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
|
return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "controller-manager"})
|
|
}
|
|
|
|
func createClients(s *options.CMServer) (*clientset.Clientset, *clientset.Clientset, *restclient.Config, error) {
|
|
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
|
if err != nil {
|
|
return nil, nil, nil, err
|
|
}
|
|
|
|
kubeconfig.ContentConfig.ContentType = s.ContentType
|
|
// Override kubeconfig qps/burst settings from flags
|
|
kubeconfig.QPS = s.KubeAPIQPS
|
|
kubeconfig.Burst = int(s.KubeAPIBurst)
|
|
kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "controller-manager"))
|
|
if err != nil {
|
|
glog.Fatalf("Invalid API configuration: %v", err)
|
|
}
|
|
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
|
return kubeClient, leaderElectionClient, kubeconfig, nil
|
|
}
|
|
|
|
type ControllerContext struct {
|
|
// ClientBuilder will provide a client for this controller to use
|
|
ClientBuilder controller.ControllerClientBuilder
|
|
|
|
// InformerFactory gives access to informers for the controller.
|
|
InformerFactory informers.SharedInformerFactory
|
|
|
|
// Options provides access to init options for a given controller
|
|
Options options.CMServer
|
|
|
|
// AvailableResources is a map listing currently available resources
|
|
AvailableResources map[schema.GroupVersionResource]bool
|
|
|
|
// Cloud is the cloud provider interface for the controllers to use.
|
|
// It must be initialized and ready to use.
|
|
Cloud cloudprovider.Interface
|
|
|
|
// Stop is the stop channel
|
|
Stop <-chan struct{}
|
|
|
|
// InformersStarted is closed after all of the controllers have been initialized and are running. After this point it is safe,
|
|
// for an individual controller to start the shared informers. Before it is closed, they should not.
|
|
InformersStarted chan struct{}
|
|
}
|
|
|
|
func (c ControllerContext) IsControllerEnabled(name string) bool {
|
|
return IsControllerEnabled(name, ControllersDisabledByDefault, c.Options.Controllers...)
|
|
}
|
|
|
|
func IsControllerEnabled(name string, disabledByDefaultControllers sets.String, controllers ...string) bool {
|
|
hasStar := false
|
|
for _, ctrl := range controllers {
|
|
if ctrl == name {
|
|
return true
|
|
}
|
|
if ctrl == "-"+name {
|
|
return false
|
|
}
|
|
if ctrl == "*" {
|
|
hasStar = true
|
|
}
|
|
}
|
|
// if we get here, there was no explicit choice
|
|
if !hasStar {
|
|
// nothing on by default
|
|
return false
|
|
}
|
|
if disabledByDefaultControllers.Has(name) {
|
|
return false
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
// InitFunc is used to launch a particular controller. It may run additional "should I activate checks".
|
|
// Any error returned will cause the controller process to `Fatal`
|
|
// The bool indicates whether the controller was enabled.
|
|
type InitFunc func(ctx ControllerContext) (bool, error)
|
|
|
|
func KnownControllers() []string {
|
|
ret := sets.StringKeySet(NewControllerInitializers())
|
|
|
|
// add "special" controllers that aren't initialized normally. These controllers cannot be initialized
|
|
// using a normal function. The only known special case is the SA token controller which *must* be started
|
|
// first to ensure that the SA tokens for future controllers will exist. Think very carefully before adding
|
|
// to this list.
|
|
ret.Insert(
|
|
saTokenControllerName,
|
|
)
|
|
|
|
return ret.List()
|
|
}
|
|
|
|
var ControllersDisabledByDefault = sets.NewString(
|
|
"bootstrapsigner",
|
|
"tokencleaner",
|
|
)
|
|
|
|
const (
|
|
saTokenControllerName = "serviceaccount-token"
|
|
)
|
|
|
|
// NewControllerInitializers is a public map of named controller groups (you can start more than one in an init func)
|
|
// paired to their InitFunc. This allows for structured downstream composition and subdivision.
|
|
func NewControllerInitializers() map[string]InitFunc {
|
|
controllers := map[string]InitFunc{}
|
|
controllers["endpoint"] = startEndpointController
|
|
controllers["replicationcontroller"] = startReplicationController
|
|
controllers["podgc"] = startPodGCController
|
|
controllers["resourcequota"] = startResourceQuotaController
|
|
controllers["namespace"] = startNamespaceController
|
|
controllers["serviceaccount"] = startServiceAccountController
|
|
controllers["garbagecollector"] = startGarbageCollectorController
|
|
controllers["daemonset"] = startDaemonSetController
|
|
controllers["job"] = startJobController
|
|
controllers["deployment"] = startDeploymentController
|
|
controllers["replicaset"] = startReplicaSetController
|
|
controllers["horizontalpodautoscaling"] = startHPAController
|
|
controllers["disruption"] = startDisruptionController
|
|
controllers["statefulset"] = startStatefulSetController
|
|
controllers["cronjob"] = startCronJobController
|
|
controllers["csrsigning"] = startCSRSigningController
|
|
controllers["csrapproving"] = startCSRApprovingController
|
|
controllers["csrcleaner"] = startCSRCleanerController
|
|
controllers["ttl"] = startTTLController
|
|
controllers["bootstrapsigner"] = startBootstrapSignerController
|
|
controllers["tokencleaner"] = startTokenCleanerController
|
|
controllers["service"] = startServiceController
|
|
controllers["node"] = startNodeController
|
|
controllers["route"] = startRouteController
|
|
controllers["persistentvolume-binder"] = startPersistentVolumeBinderController
|
|
controllers["attachdetach"] = startAttachDetachController
|
|
controllers["persistentvolume-expander"] = startVolumeExpandController
|
|
controllers["clusterrole-aggregation"] = startClusterRoleAggregrationController
|
|
controllers["pvc-protection"] = startPVCProtectionController
|
|
|
|
return controllers
|
|
}
|
|
|
|
// TODO: In general, any controller checking this needs to be dynamic so
|
|
// users don't have to restart their controller manager if they change the apiserver.
|
|
// Until we get there, the structure here needs to be exposed for the construction of a proper ControllerContext.
|
|
func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (map[schema.GroupVersionResource]bool, error) {
|
|
var discoveryClient discovery.DiscoveryInterface
|
|
|
|
var healthzContent string
|
|
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
|
// important when we start apiserver and controller manager at the same time.
|
|
err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
|
|
client, err := clientBuilder.Client("controller-discovery")
|
|
if err != nil {
|
|
glog.Errorf("Failed to get api versions from server: %v", err)
|
|
return false, nil
|
|
}
|
|
|
|
healthStatus := 0
|
|
resp := client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
|
|
if healthStatus != http.StatusOK {
|
|
glog.Errorf("Server isn't healthy yet. Waiting a little while.")
|
|
return false, nil
|
|
}
|
|
content, _ := resp.Raw()
|
|
healthzContent = string(content)
|
|
|
|
discoveryClient = client.Discovery()
|
|
return true, nil
|
|
})
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get api versions from server: %v: %v", healthzContent, err)
|
|
}
|
|
|
|
resourceMap, err := discoveryClient.ServerResources()
|
|
if err != nil {
|
|
utilruntime.HandleError(fmt.Errorf("unable to get all supported resources from server: %v", err))
|
|
}
|
|
if len(resourceMap) == 0 {
|
|
return nil, fmt.Errorf("unable to get any supported resources from server")
|
|
}
|
|
|
|
allResources := map[schema.GroupVersionResource]bool{}
|
|
for _, apiResourceList := range resourceMap {
|
|
version, err := schema.ParseGroupVersion(apiResourceList.GroupVersion)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
for _, apiResource := range apiResourceList.APIResources {
|
|
allResources[version.WithResource(apiResource.Name)] = true
|
|
}
|
|
}
|
|
|
|
return allResources, nil
|
|
}
|
|
|
|
// CreateControllerContext creates a context struct containing references to resources needed by the
|
|
// controllers such as the cloud provider and clientBuilder. rootClientBuilder is only used for
|
|
// the shared-informers client and token controller.
|
|
func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) (ControllerContext, error) {
|
|
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
|
|
sharedInformers := informers.NewSharedInformerFactory(versionedClient, ResyncPeriod(s)())
|
|
|
|
availableResources, err := GetAvailableResources(rootClientBuilder)
|
|
if err != nil {
|
|
return ControllerContext{}, err
|
|
}
|
|
|
|
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
|
if err != nil {
|
|
return ControllerContext{}, fmt.Errorf("cloud provider could not be initialized: %v", err)
|
|
}
|
|
|
|
if cloud != nil && cloud.HasClusterID() == false {
|
|
if s.AllowUntaggedCloud == true {
|
|
glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues")
|
|
} else {
|
|
return ControllerContext{}, fmt.Errorf("no ClusterID Found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option")
|
|
}
|
|
}
|
|
|
|
if informerUserCloud, ok := cloud.(cloudprovider.InformerUser); ok {
|
|
informerUserCloud.SetInformers(sharedInformers)
|
|
}
|
|
|
|
ctx := ControllerContext{
|
|
ClientBuilder: clientBuilder,
|
|
InformerFactory: sharedInformers,
|
|
Options: *s,
|
|
AvailableResources: availableResources,
|
|
Cloud: cloud,
|
|
Stop: stop,
|
|
InformersStarted: make(chan struct{}),
|
|
}
|
|
return ctx, nil
|
|
}
|
|
|
|
func StartControllers(ctx ControllerContext, startSATokenController InitFunc, controllers map[string]InitFunc) error {
|
|
// Always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
|
|
// If this fails, just return here and fail since other controllers won't be able to get credentials.
|
|
if _, err := startSATokenController(ctx); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Initialize the cloud provider with a reference to the clientBuilder only after token controller
|
|
// has started in case the cloud provider uses the client builder.
|
|
if ctx.Cloud != nil {
|
|
ctx.Cloud.Initialize(ctx.ClientBuilder)
|
|
}
|
|
|
|
for controllerName, initFn := range controllers {
|
|
if !ctx.IsControllerEnabled(controllerName) {
|
|
glog.Warningf("%q is disabled", controllerName)
|
|
continue
|
|
}
|
|
|
|
time.Sleep(wait.Jitter(ctx.Options.ControllerStartInterval.Duration, ControllerStartJitter))
|
|
|
|
glog.V(1).Infof("Starting %q", controllerName)
|
|
started, err := initFn(ctx)
|
|
if err != nil {
|
|
glog.Errorf("Error starting %q", controllerName)
|
|
return err
|
|
}
|
|
if !started {
|
|
glog.Warningf("Skipping %q", controllerName)
|
|
continue
|
|
}
|
|
glog.Infof("Started %q", controllerName)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// serviceAccountTokenControllerStarter is special because it must run first to set up permissions for other controllers.
|
|
// It cannot use the "normal" client builder, so it tracks its own. It must also avoid being included in the "normal"
|
|
// init map so that it can always run first.
|
|
type serviceAccountTokenControllerStarter struct {
|
|
rootClientBuilder controller.ControllerClientBuilder
|
|
}
|
|
|
|
func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController(ctx ControllerContext) (bool, error) {
|
|
if !ctx.IsControllerEnabled(saTokenControllerName) {
|
|
glog.Warningf("%q is disabled", saTokenControllerName)
|
|
return false, nil
|
|
}
|
|
|
|
if len(ctx.Options.ServiceAccountKeyFile) == 0 {
|
|
glog.Warningf("%q is disabled because there is no private key", saTokenControllerName)
|
|
return false, nil
|
|
}
|
|
privateKey, err := certutil.PrivateKeyFromFile(ctx.Options.ServiceAccountKeyFile)
|
|
if err != nil {
|
|
return true, fmt.Errorf("error reading key for service account token controller: %v", err)
|
|
}
|
|
|
|
var rootCA []byte
|
|
if ctx.Options.RootCAFile != "" {
|
|
rootCA, err = ioutil.ReadFile(ctx.Options.RootCAFile)
|
|
if err != nil {
|
|
return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.Options.RootCAFile, err)
|
|
}
|
|
if _, err := certutil.ParseCertsPEM(rootCA); err != nil {
|
|
return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.Options.RootCAFile, err)
|
|
}
|
|
} else {
|
|
rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData
|
|
}
|
|
|
|
controller, err := serviceaccountcontroller.NewTokensController(
|
|
ctx.InformerFactory.Core().V1().ServiceAccounts(),
|
|
ctx.InformerFactory.Core().V1().Secrets(),
|
|
c.rootClientBuilder.ClientOrDie("tokens-controller"),
|
|
serviceaccountcontroller.TokensControllerOptions{
|
|
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
|
|
RootCA: rootCA,
|
|
},
|
|
)
|
|
if err != nil {
|
|
return true, fmt.Errorf("error creating Tokens controller: %v", err)
|
|
}
|
|
go controller.Run(int(ctx.Options.ConcurrentSATokenSyncs), ctx.Stop)
|
|
|
|
// start the first set of informers now so that other controllers can start
|
|
ctx.InformerFactory.Start(ctx.Stop)
|
|
|
|
return true, nil
|
|
}
|