vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -17,41 +17,35 @@ limitations under the License.
package app
import (
"fmt"
"math/rand"
"net"
"net/http"
"net/http/pprof"
"os"
goruntime "runtime"
"strconv"
"strings"
"time"
"k8s.io/api/core/v1"
"github.com/golang/glog"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
cloudcontrollerconfig "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config"
"k8s.io/kubernetes/cmd/cloud-controller-manager/app/options"
"k8s.io/kubernetes/pkg/api/legacyscheme"
genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
cloudcontrollers "k8s.io/kubernetes/pkg/controller/cloud"
routecontroller "k8s.io/kubernetes/pkg/controller/route"
servicecontroller "k8s.io/kubernetes/pkg/controller/service"
"k8s.io/kubernetes/pkg/util/configz"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
utilflag "k8s.io/kubernetes/pkg/util/flag"
"k8s.io/kubernetes/pkg/version/verflag"
)
const (
@ -61,94 +55,101 @@ const (
// NewCloudControllerManagerCommand creates a *cobra.Command object with default parameters
func NewCloudControllerManagerCommand() *cobra.Command {
s := options.NewCloudControllerManagerServer()
s.AddFlags(pflag.CommandLine)
s := options.NewCloudControllerManagerOptions()
cmd := &cobra.Command{
Use: "cloud-controller-manager",
Long: `The Cloud controller manager is a daemon that embeds
the cloud specific control loops shipped with Kubernetes.`,
Run: func(cmd *cobra.Command, args []string) {
verflag.PrintAndExitIfRequested()
utilflag.PrintFlags(cmd.Flags())
c, err := s.Config()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
if err := Run(c.Complete()); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
},
}
s.AddFlags(cmd.Flags())
return cmd
}
// resyncPeriod computes the time interval a shared informer waits before resyncing with the api server
func resyncPeriod(s *options.CloudControllerManagerServer) func() time.Duration {
func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration {
return func() time.Duration {
factor := rand.Float64() + 1
return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor)
return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor)
}
}
// Run runs the ExternalCMServer. This should never exit.
func Run(s *options.CloudControllerManagerServer) error {
if s.CloudProvider == "" {
glog.Fatalf("--cloud-provider cannot be empty")
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
func Run(c *cloudcontrollerconfig.CompletedConfig) error {
cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider, c.Generic.ComponentConfig.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
if cloud == nil {
glog.Fatalf("cloud provider is nil")
}
if cloud.HasClusterID() == false {
if s.AllowUntaggedCloud == true {
if c.Generic.ComponentConfig.AllowUntaggedCloud == true {
glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues")
} else {
glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option")
}
}
if c, err := configz.New("componentconfig"); err == nil {
c.Set(s.KubeControllerManagerConfiguration)
// setup /configz endpoint
if cz, err := configz.New("componentconfig"); err == nil {
cz.Set(c.Generic.ComponentConfig)
} else {
glog.Errorf("unable to register configz: %s", err)
}
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
if err != nil {
return err
glog.Errorf("unable to register configz: %c", err)
}
// Set the ContentType of the requests from kube client
kubeconfig.ContentConfig.ContentType = s.ContentType
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.KubeAPIQPS
kubeconfig.Burst = int(s.KubeAPIBurst)
kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "cloud-controller-manager"))
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
// Start the controller manager HTTP server
stopCh := make(chan struct{})
if c.Generic.SecureServing != nil {
if err := genericcontrollermanager.Serve(&c.Generic, c.Generic.SecureServing.Serve, stopCh); err != nil {
return err
}
}
if c.Generic.InsecureServing != nil {
if err := genericcontrollermanager.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil {
return err
}
}
leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
// Start the external controller manager server
go startHTTP(s)
recorder := createRecorder(kubeClient)
run := func(stop <-chan struct{}) {
rootClientBuilder := controller.SimpleControllerClientBuilder{
ClientConfig: kubeconfig,
ClientConfig: c.Generic.Kubeconfig,
}
var clientBuilder controller.ControllerClientBuilder
if s.UseServiceAccountCredentials {
if c.Generic.ComponentConfig.UseServiceAccountCredentials {
clientBuilder = controller.SAControllerClientBuilder{
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
CoreClient: kubeClient.CoreV1(),
AuthenticationClient: kubeClient.Authentication(),
ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig),
CoreClient: c.Generic.Client.CoreV1(),
AuthenticationClient: c.Generic.Client.AuthenticationV1(),
Namespace: "kube-system",
}
} else {
clientBuilder = rootClientBuilder
}
err := StartControllers(s, kubeconfig, clientBuilder, stop, recorder, cloud)
glog.Fatalf("error running controllers: %v", err)
panic("unreachable")
if err := startControllers(c, c.Generic.Kubeconfig, rootClientBuilder, clientBuilder, stop, c.Generic.EventRecorder, cloud); err != nil {
glog.Fatalf("error running controllers: %v", err)
}
}
if !s.LeaderElection.LeaderElect {
if !c.Generic.ComponentConfig.LeaderElection.LeaderElect {
run(nil)
panic("unreachable")
}
@ -158,15 +159,17 @@ func Run(s *options.CloudControllerManagerServer) error {
if err != nil {
return err
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id = id + "_" + string(uuid.NewUUID())
// Lock required for leader election
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock,
"kube-system",
"cloud-controller-manager",
leaderElectionClient.CoreV1(),
c.Generic.LeaderElectionClient.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: id + "-external-cloud-controller",
EventRecorder: recorder,
Identity: id,
EventRecorder: c.Generic.EventRecorder,
})
if err != nil {
glog.Fatalf("error creating lock: %v", err)
@ -175,9 +178,9 @@ func Run(s *options.CloudControllerManagerServer) error {
// Try and become the leader and start cloud controller manager loops
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration,
RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration,
RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: run,
OnStoppedLeading: func() {
@ -188,36 +191,36 @@ func Run(s *options.CloudControllerManagerServer) error {
panic("unreachable")
}
// StartControllers starts the cloud specific controller loops.
func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restclient.Config, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error {
// startControllers starts the cloud specific controller loops.
func startControllers(c *cloudcontrollerconfig.CompletedConfig, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error {
// Function to build the kube client object
client := func(serviceAccountName string) clientset.Interface {
client := func(serviceAccountName string) kubernetes.Interface {
return clientBuilder.ClientOrDie(serviceAccountName)
}
if cloud != nil {
// Initialize the cloud provider with a reference to the clientBuilder
cloud.Initialize(clientBuilder)
}
versionedClient := client("shared-informers")
sharedInformers := informers.NewSharedInformerFactory(versionedClient, resyncPeriod(s)())
// TODO: move this setup into Config
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
sharedInformers := informers.NewSharedInformerFactory(versionedClient, resyncPeriod(c)())
// Start the CloudNodeController
nodeController := cloudcontrollers.NewCloudNodeController(
sharedInformers.Core().V1().Nodes(),
client("cloud-node-controller"), cloud,
s.NodeMonitorPeriod.Duration,
s.NodeStatusUpdateFrequency.Duration)
c.Generic.ComponentConfig.NodeMonitorPeriod.Duration,
c.Extra.NodeStatusUpdateFrequency)
nodeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
// Start the PersistentVolumeLabelController
pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud)
threads := 5
go pvlController.Run(threads, stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
// Start the service controller
serviceController, err := servicecontroller.New(
@ -225,79 +228,44 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc
client("service-controller"),
sharedInformers.Core().V1().Services(),
sharedInformers.Core().V1().Nodes(),
s.ClusterName,
c.Generic.ComponentConfig.ClusterName,
)
if err != nil {
glog.Errorf("Failed to start service controller: %v", err)
} else {
go serviceController.Run(stop, int(s.ConcurrentServiceSyncs))
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
go serviceController.Run(stop, int(c.Generic.ComponentConfig.ConcurrentServiceSyncs))
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
}
// If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller
if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes {
if c.Generic.ComponentConfig.AllocateNodeCIDRs && c.Generic.ComponentConfig.ConfigureCloudRoutes {
if routes, ok := cloud.Routes(); !ok {
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
} else {
var clusterCIDR *net.IPNet
if len(strings.TrimSpace(s.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(s.ClusterCIDR)
if len(strings.TrimSpace(c.Generic.ComponentConfig.ClusterCIDR)) != 0 {
_, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.ClusterCIDR)
if err != nil {
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err)
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.ClusterCIDR, err)
}
}
routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), s.ClusterName, clusterCIDR)
go routeController.Run(stop, s.RouteReconciliationPeriod.Duration)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.ClusterName, clusterCIDR)
go routeController.Run(stop, c.Generic.ComponentConfig.RouteReconciliationPeriod.Duration)
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
}
} else {
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes)
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.AllocateNodeCIDRs, c.Generic.ComponentConfig.ConfigureCloudRoutes)
}
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
if _, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
return true, nil
}
glog.Errorf("Failed to get api versions from server: %v", err)
return false, nil
})
err = genericcontrollermanager.WaitForAPIServer(versionedClient, 10*time.Second)
if err != nil {
glog.Fatalf("Failed to get api versions from server: %v", err)
glog.Fatalf("Failed to wait for apiserver being healthy: %v", err)
}
sharedInformers.Start(stop)
select {}
}
func startHTTP(s *options.CloudControllerManagerServer) {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
if s.EnableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}
}
configz.InstallHandler(mux)
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}
func createRecorder(kubeClient *clientset.Clientset) record.EventRecorder {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"})
}