mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor updates
This commit is contained in:
283
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go
generated
vendored
283
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/controllermanager.go
generated
vendored
@ -24,46 +24,34 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/discovery"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
genericcontrollerconfig "k8s.io/kubernetes/cmd/controller-manager/app"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/config"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
utilflag "k8s.io/kubernetes/pkg/util/flag"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/kubernetes/pkg/version/verflag"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -71,10 +59,16 @@ const (
|
||||
ControllerStartJitter = 1.0
|
||||
)
|
||||
|
||||
type ControllerLoopMode int
|
||||
|
||||
const (
|
||||
IncludeCloudLoops ControllerLoopMode = iota
|
||||
ExternalLoops
|
||||
)
|
||||
|
||||
// NewControllerManagerCommand creates a *cobra.Command object with default parameters
|
||||
func NewControllerManagerCommand() *cobra.Command {
|
||||
s := options.NewCMServer()
|
||||
s.AddFlags(pflag.CommandLine, KnownControllers(), ControllersDisabledByDefault.List())
|
||||
s := options.NewKubeControllerManagerOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "kube-controller-manager",
|
||||
Long: `The Kubernetes controller manager is a daemon that embeds
|
||||
@ -86,8 +80,22 @@ current state towards the desired state. Examples of controllers that ship with
|
||||
Kubernetes today are the replication controller, endpoints controller, namespace
|
||||
controller, and serviceaccounts controller.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
verflag.PrintAndExitIfRequested()
|
||||
utilflag.PrintFlags(cmd.Flags())
|
||||
|
||||
c, err := s.Config(KnownControllers(), ControllersDisabledByDefault.List())
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := Run(c.Complete()); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
s.AddFlags(cmd.Flags(), KnownControllers(), ControllersDisabledByDefault.List())
|
||||
|
||||
return cmd
|
||||
}
|
||||
@ -95,63 +103,64 @@ controller, and serviceaccounts controller.`,
|
||||
// ResyncPeriod returns a function which generates a duration each time it is
|
||||
// invoked; this is so that multiple controllers don't get into lock-step and all
|
||||
// hammer the apiserver with list requests simultaneously.
|
||||
func ResyncPeriod(s *options.CMServer) func() time.Duration {
|
||||
func ResyncPeriod(c *config.CompletedConfig) func() time.Duration {
|
||||
return func() time.Duration {
|
||||
factor := rand.Float64() + 1
|
||||
return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor)
|
||||
return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor)
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the CMServer. This should never exit.
|
||||
func Run(s *options.CMServer) error {
|
||||
// Run runs the KubeControllerManagerOptions. This should never exit.
|
||||
func Run(c *config.CompletedConfig) error {
|
||||
// To help debugging, immediately log version
|
||||
glog.Infof("Version: %+v", version.Get())
|
||||
if err := s.Validate(KnownControllers(), ControllersDisabledByDefault.List()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c, err := configz.New("componentconfig"); err == nil {
|
||||
c.Set(s.KubeControllerManagerConfiguration)
|
||||
if cfgz, err := configz.New("componentconfig"); err == nil {
|
||||
cfgz.Set(c.Generic.ComponentConfig)
|
||||
} else {
|
||||
glog.Errorf("unable to register configz: %s", err)
|
||||
glog.Errorf("unable to register configz: %c", err)
|
||||
}
|
||||
|
||||
kubeClient, leaderElectionClient, kubeconfig, err := createClients(s)
|
||||
if err != nil {
|
||||
return err
|
||||
// Start the controller manager HTTP server
|
||||
stopCh := make(chan struct{})
|
||||
if c.Generic.SecureServing != nil {
|
||||
if err := genericcontrollerconfig.Serve(&c.Generic, c.Generic.SecureServing.Serve, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.Generic.InsecureServing != nil {
|
||||
if err := genericcontrollerconfig.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
go startHTTP(s)
|
||||
|
||||
recorder := createRecorder(kubeClient)
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
rootClientBuilder := controller.SimpleControllerClientBuilder{
|
||||
ClientConfig: kubeconfig,
|
||||
ClientConfig: c.Generic.Kubeconfig,
|
||||
}
|
||||
var clientBuilder controller.ControllerClientBuilder
|
||||
if s.UseServiceAccountCredentials {
|
||||
if len(s.ServiceAccountKeyFile) == 0 {
|
||||
// It's possible another controller process is creating the tokens for us.
|
||||
if c.Generic.ComponentConfig.UseServiceAccountCredentials {
|
||||
if len(c.Generic.ComponentConfig.ServiceAccountKeyFile) == 0 {
|
||||
// It'c possible another controller process is creating the tokens for us.
|
||||
// If one isn't, we'll timeout and exit when our client builder is unable to create the tokens.
|
||||
glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file")
|
||||
}
|
||||
clientBuilder = controller.SAControllerClientBuilder{
|
||||
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
|
||||
CoreClient: kubeClient.CoreV1(),
|
||||
AuthenticationClient: kubeClient.Authentication(),
|
||||
ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig),
|
||||
CoreClient: c.Generic.Client.CoreV1(),
|
||||
AuthenticationClient: c.Generic.Client.AuthenticationV1(),
|
||||
Namespace: "kube-system",
|
||||
}
|
||||
} else {
|
||||
clientBuilder = rootClientBuilder
|
||||
}
|
||||
ctx, err := CreateControllerContext(s, rootClientBuilder, clientBuilder, stop)
|
||||
ctx, err := CreateControllerContext(c, rootClientBuilder, clientBuilder, stop)
|
||||
if err != nil {
|
||||
glog.Fatalf("error building controller context: %v", err)
|
||||
}
|
||||
saTokenControllerInitFunc := serviceAccountTokenControllerStarter{rootClientBuilder: rootClientBuilder}.startServiceAccountTokenController
|
||||
|
||||
if err := StartControllers(ctx, saTokenControllerInitFunc, NewControllerInitializers()); err != nil {
|
||||
if err := StartControllers(ctx, saTokenControllerInitFunc, NewControllerInitializers(ctx.LoopMode)); err != nil {
|
||||
glog.Fatalf("error starting controllers: %v", err)
|
||||
}
|
||||
|
||||
@ -161,8 +170,8 @@ func Run(s *options.CMServer) error {
|
||||
select {}
|
||||
}
|
||||
|
||||
if !s.LeaderElection.LeaderElect {
|
||||
run(nil)
|
||||
if !c.Generic.ComponentConfig.LeaderElection.LeaderElect {
|
||||
run(wait.NeverStop)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
@ -171,13 +180,15 @@ func Run(s *options.CMServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
|
||||
// add a uniquifier so that two processes on the same host don't accidentally both become active
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock,
|
||||
"kube-system",
|
||||
"kube-controller-manager",
|
||||
leaderElectionClient.CoreV1(),
|
||||
c.Generic.LeaderElectionClient.CoreV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: recorder,
|
||||
EventRecorder: c.Generic.EventRecorder,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating lock: %v", err)
|
||||
@ -185,9 +196,9 @@ func Run(s *options.CMServer) error {
|
||||
|
||||
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
||||
Lock: rl,
|
||||
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
||||
LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: run,
|
||||
OnStoppedLeading: func() {
|
||||
@ -198,53 +209,6 @@ func Run(s *options.CMServer) error {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func startHTTP(s *options.CMServer) {
|
||||
mux := http.NewServeMux()
|
||||
healthz.InstallHandler(mux)
|
||||
if s.EnableProfiling {
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
if s.EnableContentionProfiling {
|
||||
goruntime.SetBlockProfileRate(1)
|
||||
}
|
||||
}
|
||||
configz.InstallHandler(mux)
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
}
|
||||
|
||||
func createRecorder(kubeClient *clientset.Clientset) record.EventRecorder {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "controller-manager"})
|
||||
}
|
||||
|
||||
func createClients(s *options.CMServer) (*clientset.Clientset, *clientset.Clientset, *restclient.Config, error) {
|
||||
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
kubeconfig.ContentConfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = s.KubeAPIQPS
|
||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||
kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "controller-manager"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
return kubeClient, leaderElectionClient, kubeconfig, nil
|
||||
}
|
||||
|
||||
type ControllerContext struct {
|
||||
// ClientBuilder will provide a client for this controller to use
|
||||
ClientBuilder controller.ControllerClientBuilder
|
||||
@ -253,7 +217,7 @@ type ControllerContext struct {
|
||||
InformerFactory informers.SharedInformerFactory
|
||||
|
||||
// Options provides access to init options for a given controller
|
||||
Options options.CMServer
|
||||
ComponentConfig componentconfig.KubeControllerManagerConfiguration
|
||||
|
||||
// AvailableResources is a map listing currently available resources
|
||||
AvailableResources map[schema.GroupVersionResource]bool
|
||||
@ -262,16 +226,26 @@ type ControllerContext struct {
|
||||
// It must be initialized and ready to use.
|
||||
Cloud cloudprovider.Interface
|
||||
|
||||
// Control for which control loops to be run
|
||||
// IncludeCloudLoops is for a kube-controller-manager running all loops
|
||||
// ExternalLoops is for a kube-controller-manager running with a cloud-controller-manager
|
||||
LoopMode ControllerLoopMode
|
||||
|
||||
// Stop is the stop channel
|
||||
Stop <-chan struct{}
|
||||
|
||||
// InformersStarted is closed after all of the controllers have been initialized and are running. After this point it is safe,
|
||||
// for an individual controller to start the shared informers. Before it is closed, they should not.
|
||||
InformersStarted chan struct{}
|
||||
|
||||
// ResyncPeriod generates a duration each time it is invoked; this is so that
|
||||
// multiple controllers don't get into lock-step and all hammer the apiserver
|
||||
// with list requests simultaneously.
|
||||
ResyncPeriod func() time.Duration
|
||||
}
|
||||
|
||||
func (c ControllerContext) IsControllerEnabled(name string) bool {
|
||||
return IsControllerEnabled(name, ControllersDisabledByDefault, c.Options.Controllers...)
|
||||
return IsControllerEnabled(name, ControllersDisabledByDefault, c.ComponentConfig.Controllers...)
|
||||
}
|
||||
|
||||
func IsControllerEnabled(name string, disabledByDefaultControllers sets.String, controllers ...string) bool {
|
||||
@ -305,7 +279,7 @@ func IsControllerEnabled(name string, disabledByDefaultControllers sets.String,
|
||||
type InitFunc func(ctx ControllerContext) (bool, error)
|
||||
|
||||
func KnownControllers() []string {
|
||||
ret := sets.StringKeySet(NewControllerInitializers())
|
||||
ret := sets.StringKeySet(NewControllerInitializers(IncludeCloudLoops))
|
||||
|
||||
// add "special" controllers that aren't initialized normally. These controllers cannot be initialized
|
||||
// using a normal function. The only known special case is the SA token controller which *must* be started
|
||||
@ -329,7 +303,7 @@ const (
|
||||
|
||||
// NewControllerInitializers is a public map of named controller groups (you can start more than one in an init func)
|
||||
// paired to their InitFunc. This allows for structured downstream composition and subdivision.
|
||||
func NewControllerInitializers() map[string]InitFunc {
|
||||
func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc {
|
||||
controllers := map[string]InitFunc{}
|
||||
controllers["endpoint"] = startEndpointController
|
||||
controllers["replicationcontroller"] = startReplicationController
|
||||
@ -352,14 +326,20 @@ func NewControllerInitializers() map[string]InitFunc {
|
||||
controllers["ttl"] = startTTLController
|
||||
controllers["bootstrapsigner"] = startBootstrapSignerController
|
||||
controllers["tokencleaner"] = startTokenCleanerController
|
||||
controllers["service"] = startServiceController
|
||||
controllers["node"] = startNodeController
|
||||
controllers["route"] = startRouteController
|
||||
if loopMode == IncludeCloudLoops {
|
||||
controllers["service"] = startServiceController
|
||||
controllers["nodeipam"] = startNodeIpamController
|
||||
controllers["route"] = startRouteController
|
||||
// TODO: volume controller into the IncludeCloudLoops only set.
|
||||
// TODO: Separate cluster in cloud check from node lifecycle controller.
|
||||
}
|
||||
controllers["nodelifecycle"] = startNodeLifecycleController
|
||||
controllers["persistentvolume-binder"] = startPersistentVolumeBinderController
|
||||
controllers["attachdetach"] = startAttachDetachController
|
||||
controllers["persistentvolume-expander"] = startVolumeExpandController
|
||||
controllers["clusterrole-aggregation"] = startClusterRoleAggregrationController
|
||||
controllers["pvc-protection"] = startPVCProtectionController
|
||||
controllers["pv-protection"] = startPVProtectionController
|
||||
|
||||
return controllers
|
||||
}
|
||||
@ -368,34 +348,8 @@ func NewControllerInitializers() map[string]InitFunc {
|
||||
// users don't have to restart their controller manager if they change the apiserver.
|
||||
// Until we get there, the structure here needs to be exposed for the construction of a proper ControllerContext.
|
||||
func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (map[schema.GroupVersionResource]bool, error) {
|
||||
var discoveryClient discovery.DiscoveryInterface
|
||||
|
||||
var healthzContent string
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
|
||||
client, err := clientBuilder.Client("controller-discovery")
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get api versions from server: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
healthStatus := 0
|
||||
resp := client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
|
||||
if healthStatus != http.StatusOK {
|
||||
glog.Errorf("Server isn't healthy yet. Waiting a little while.")
|
||||
return false, nil
|
||||
}
|
||||
content, _ := resp.Raw()
|
||||
healthzContent = string(content)
|
||||
|
||||
discoveryClient = client.Discovery()
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get api versions from server: %v: %v", healthzContent, err)
|
||||
}
|
||||
|
||||
client := clientBuilder.ClientOrDie("controller-discovery")
|
||||
discoveryClient := client.Discovery()
|
||||
resourceMap, err := discoveryClient.ServerResources()
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to get all supported resources from server: %v", err))
|
||||
@ -421,40 +375,37 @@ func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (ma
|
||||
// CreateControllerContext creates a context struct containing references to resources needed by the
|
||||
// controllers such as the cloud provider and clientBuilder. rootClientBuilder is only used for
|
||||
// the shared-informers client and token controller.
|
||||
func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) (ControllerContext, error) {
|
||||
func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) (ControllerContext, error) {
|
||||
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
|
||||
sharedInformers := informers.NewSharedInformerFactory(versionedClient, ResyncPeriod(s)())
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
if err := genericcontrollerconfig.WaitForAPIServer(versionedClient, 10*time.Second); err != nil {
|
||||
return ControllerContext{}, fmt.Errorf("failed to wait for apiserver being healthy: %v", err)
|
||||
}
|
||||
|
||||
availableResources, err := GetAvailableResources(rootClientBuilder)
|
||||
if err != nil {
|
||||
return ControllerContext{}, err
|
||||
}
|
||||
|
||||
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider, s.Generic.ComponentConfig.ExternalCloudVolumePlugin,
|
||||
s.Generic.ComponentConfig.CloudConfigFile, s.Generic.ComponentConfig.AllowUntaggedCloud, sharedInformers)
|
||||
if err != nil {
|
||||
return ControllerContext{}, fmt.Errorf("cloud provider could not be initialized: %v", err)
|
||||
}
|
||||
|
||||
if cloud != nil && cloud.HasClusterID() == false {
|
||||
if s.AllowUntaggedCloud == true {
|
||||
glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues")
|
||||
} else {
|
||||
return ControllerContext{}, fmt.Errorf("no ClusterID Found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option")
|
||||
}
|
||||
}
|
||||
|
||||
if informerUserCloud, ok := cloud.(cloudprovider.InformerUser); ok {
|
||||
informerUserCloud.SetInformers(sharedInformers)
|
||||
return ControllerContext{}, err
|
||||
}
|
||||
|
||||
ctx := ControllerContext{
|
||||
ClientBuilder: clientBuilder,
|
||||
InformerFactory: sharedInformers,
|
||||
Options: *s,
|
||||
ComponentConfig: s.Generic.ComponentConfig,
|
||||
AvailableResources: availableResources,
|
||||
Cloud: cloud,
|
||||
LoopMode: loopMode,
|
||||
Stop: stop,
|
||||
InformersStarted: make(chan struct{}),
|
||||
ResyncPeriod: ResyncPeriod(s),
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
@ -478,7 +429,7 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co
|
||||
continue
|
||||
}
|
||||
|
||||
time.Sleep(wait.Jitter(ctx.Options.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
time.Sleep(wait.Jitter(ctx.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
glog.V(1).Infof("Starting %q", controllerName)
|
||||
started, err := initFn(ctx)
|
||||
@ -509,23 +460,23 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(ctx.Options.ServiceAccountKeyFile) == 0 {
|
||||
if len(ctx.ComponentConfig.ServiceAccountKeyFile) == 0 {
|
||||
glog.Warningf("%q is disabled because there is no private key", saTokenControllerName)
|
||||
return false, nil
|
||||
}
|
||||
privateKey, err := certutil.PrivateKeyFromFile(ctx.Options.ServiceAccountKeyFile)
|
||||
privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.ServiceAccountKeyFile)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error reading key for service account token controller: %v", err)
|
||||
}
|
||||
|
||||
var rootCA []byte
|
||||
if ctx.Options.RootCAFile != "" {
|
||||
rootCA, err = ioutil.ReadFile(ctx.Options.RootCAFile)
|
||||
if ctx.ComponentConfig.RootCAFile != "" {
|
||||
rootCA, err = ioutil.ReadFile(ctx.ComponentConfig.RootCAFile)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.Options.RootCAFile, err)
|
||||
return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err)
|
||||
}
|
||||
if _, err := certutil.ParseCertsPEM(rootCA); err != nil {
|
||||
return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.Options.RootCAFile, err)
|
||||
return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err)
|
||||
}
|
||||
} else {
|
||||
rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData
|
||||
@ -536,14 +487,14 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
|
||||
ctx.InformerFactory.Core().V1().Secrets(),
|
||||
c.rootClientBuilder.ClientOrDie("tokens-controller"),
|
||||
serviceaccountcontroller.TokensControllerOptions{
|
||||
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
|
||||
TokenGenerator: serviceaccount.JWTTokenGenerator(serviceaccount.LegacyIssuer, privateKey),
|
||||
RootCA: rootCA,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error creating Tokens controller: %v", err)
|
||||
}
|
||||
go controller.Run(int(ctx.Options.ConcurrentSATokenSyncs), ctx.Stop)
|
||||
go controller.Run(int(ctx.ComponentConfig.ConcurrentSATokenSyncs), ctx.Stop)
|
||||
|
||||
// start the first set of informers now so that other controllers can start
|
||||
ctx.InformerFactory.Start(ctx.Stop)
|
||||
|
Reference in New Issue
Block a user