mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor updates
This commit is contained in:
149
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
149
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
@ -17,8 +17,10 @@ limitations under the License.
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -57,7 +59,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubeletconfigv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
@ -74,7 +75,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
||||
@ -96,6 +99,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
||||
kubeio "k8s.io/kubernetes/pkg/util/io"
|
||||
@ -104,7 +108,6 @@ import (
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
@ -203,7 +206,6 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
registerNode bool,
|
||||
registerWithTaints []api.Taint,
|
||||
allowedUnsafeSysctls []string,
|
||||
containerized bool,
|
||||
remoteRuntimeEndpoint string,
|
||||
remoteImageEndpoint string,
|
||||
experimentalMounterPath string,
|
||||
@ -225,27 +227,7 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
|
||||
// these objects while we figure out a more comprehensive dependency injection story for the Kubelet.
|
||||
type Dependencies struct {
|
||||
// TODO(mtaufen): KubeletBuilder:
|
||||
// Mesos currently uses this as a hook to let them make their own call to
|
||||
// let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with
|
||||
// their own KubeletBootstrap. It's a useful hook. I need to think about what
|
||||
// a nice home for it would be. There seems to be a trend, between this and
|
||||
// the Options fields below, of providing hooks where you can add extra functionality
|
||||
// to the Kubelet for your solution. Maybe we should centralize these sorts of things?
|
||||
Builder Builder
|
||||
|
||||
// TODO(mtaufen): ContainerRuntimeOptions and Options:
|
||||
// Arrays of functions that can do arbitrary things to the Kubelet and the Runtime
|
||||
// seem like a difficult path to trace when it's time to debug something.
|
||||
// I'm leaving these fields here for now, but there is likely an easier-to-follow
|
||||
// way to support their intended use cases. E.g. ContainerRuntimeOptions
|
||||
// is used by Mesos to set an environment variable in containers which has
|
||||
// some connection to their container GC. It seems that Mesos intends to use
|
||||
// Options to add additional node conditions that are updated as part of the
|
||||
// Kubelet lifecycle (see https://github.com/kubernetes/kubernetes/pull/21521).
|
||||
// We should think about providing more explicit ways of doing these things.
|
||||
ContainerRuntimeOptions []kubecontainer.Option
|
||||
Options []Option
|
||||
Options []Option
|
||||
|
||||
// Injected Dependencies
|
||||
Auth server.AuthInterface
|
||||
@ -274,8 +256,8 @@ type Dependencies struct {
|
||||
// KubeletConfiguration or returns an error.
|
||||
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, bootstrapCheckpointPath string) (*config.PodConfig, error) {
|
||||
manifestURLHeader := make(http.Header)
|
||||
if len(kubeCfg.ManifestURLHeader) > 0 {
|
||||
for k, v := range kubeCfg.ManifestURLHeader {
|
||||
if len(kubeCfg.StaticPodURLHeader) > 0 {
|
||||
for k, v := range kubeCfg.StaticPodURLHeader {
|
||||
for i := range v {
|
||||
manifestURLHeader.Add(k, v[i])
|
||||
}
|
||||
@ -286,23 +268,25 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
|
||||
cfg := config.NewPodConfig(config.PodConfigNotificationIncremental, kubeDeps.Recorder)
|
||||
|
||||
// define file config source
|
||||
if kubeCfg.PodManifestPath != "" {
|
||||
glog.Infof("Adding manifest path: %v", kubeCfg.PodManifestPath)
|
||||
config.NewSourceFile(kubeCfg.PodManifestPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource))
|
||||
if kubeCfg.StaticPodPath != "" {
|
||||
glog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath)
|
||||
config.NewSourceFile(kubeCfg.StaticPodPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource))
|
||||
}
|
||||
|
||||
// define url config source
|
||||
if kubeCfg.ManifestURL != "" {
|
||||
glog.Infof("Adding manifest url %q with HTTP header %v", kubeCfg.ManifestURL, manifestURLHeader)
|
||||
config.NewSourceURL(kubeCfg.ManifestURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource))
|
||||
if kubeCfg.StaticPodURL != "" {
|
||||
glog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader)
|
||||
config.NewSourceURL(kubeCfg.StaticPodURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource))
|
||||
}
|
||||
|
||||
// Restore from the checkpoint path
|
||||
// NOTE: This MUST happen before creating the apiserver source
|
||||
// below, or the checkpoint would override the source of truth.
|
||||
updatechannel := cfg.Channel(kubetypes.ApiserverSource)
|
||||
|
||||
var updatechannel chan<- interface{}
|
||||
if bootstrapCheckpointPath != "" {
|
||||
glog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath)
|
||||
updatechannel = cfg.Channel(kubetypes.ApiserverSource)
|
||||
err := cfg.Restore(bootstrapCheckpointPath, updatechannel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -311,6 +295,9 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
|
||||
|
||||
if kubeDeps.KubeClient != nil {
|
||||
glog.Infof("Watching apiserver")
|
||||
if updatechannel == nil {
|
||||
updatechannel = cfg.Channel(kubetypes.ApiserverSource)
|
||||
}
|
||||
config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, updatechannel)
|
||||
}
|
||||
return cfg, nil
|
||||
@ -344,7 +331,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
registerNode bool,
|
||||
registerWithTaints []api.Taint,
|
||||
allowedUnsafeSysctls []string,
|
||||
containerized bool,
|
||||
remoteRuntimeEndpoint string,
|
||||
remoteImageEndpoint string,
|
||||
experimentalMounterPath string,
|
||||
@ -392,7 +378,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
return nil, fmt.Errorf("failed to get instances from cloud provider")
|
||||
}
|
||||
|
||||
nodeName, err = instances.CurrentNodeName(hostname)
|
||||
nodeName, err = instances.CurrentNodeName(context.TODO(), hostname)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
|
||||
}
|
||||
@ -400,7 +386,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
nodeAddresses, err := instances.NodeAddresses(nodeName)
|
||||
nodeAddresses, err := instances.NodeAddresses(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get the addresses of the current instance from the cloud provider: %v", err)
|
||||
}
|
||||
@ -521,7 +507,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
recorder: kubeDeps.Recorder,
|
||||
cadvisor: kubeDeps.CAdvisorInterface,
|
||||
cloud: kubeDeps.Cloud,
|
||||
autoDetectCloudProvider: (kubeletconfigv1alpha1.AutoDetectCloudProvider == cloudProvider),
|
||||
externalCloudProvider: cloudprovider.IsExternal(cloudProvider),
|
||||
providerID: providerID,
|
||||
nodeRef: nodeRef,
|
||||
@ -614,6 +599,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
var nl *NoOpLegacyHost
|
||||
pluginSettings.LegacyRuntimeHost = nl
|
||||
|
||||
if containerRuntime == kubetypes.RktContainerRuntime {
|
||||
glog.Warningln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.")
|
||||
}
|
||||
|
||||
// rktnetes cannot be run with CRI.
|
||||
if containerRuntime != kubetypes.RktContainerRuntime {
|
||||
// kubelet defers to the runtime shim to setup networking. Setting
|
||||
@ -634,9 +623,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ds.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// For now, the CRI shim redirects the streaming requests to the
|
||||
// kubelet, which handles the requests using DockerService..
|
||||
klet.criHandler = ds
|
||||
@ -657,8 +643,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
return nil, err
|
||||
}
|
||||
if !supported {
|
||||
klet.dockerLegacyService = ds.NewDockerLegacyService()
|
||||
legacyLogProvider = dockershim.NewLegacyLogProvider(klet.dockerLegacyService)
|
||||
klet.dockerLegacyService = ds
|
||||
legacyLogProvider = ds
|
||||
}
|
||||
case kubetypes.RemoteContainerRuntime:
|
||||
// No-op.
|
||||
@ -711,7 +697,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.podManager,
|
||||
klet.runtimeCache,
|
||||
runtimeService,
|
||||
imageService)
|
||||
imageService,
|
||||
stats.NewLogMetricsService())
|
||||
}
|
||||
} else {
|
||||
// rkt uses the legacy, non-CRI, integration. Configure it the old way.
|
||||
@ -767,12 +754,27 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
|
||||
|
||||
// setup imageManager
|
||||
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy)
|
||||
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, crOptions.PodSandboxImage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize image manager: %v", err)
|
||||
}
|
||||
klet.imageManager = imageManager
|
||||
|
||||
if containerRuntime == kubetypes.RemoteContainerRuntime && utilfeature.DefaultFeatureGate.Enabled(features.CRIContainerLogRotation) {
|
||||
// setup containerLogManager for CRI container runtime
|
||||
containerLogManager, err := logs.NewContainerLogManager(
|
||||
klet.runtimeService,
|
||||
kubeCfg.ContainerLogMaxSize,
|
||||
int(kubeCfg.ContainerLogMaxFiles),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
|
||||
}
|
||||
klet.containerLogManager = containerLogManager
|
||||
} else {
|
||||
klet.containerLogManager = logs.NewStubContainerLogManager()
|
||||
}
|
||||
|
||||
klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) && kubeDeps.TLSOptions != nil {
|
||||
@ -797,7 +799,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
kubeDeps.TLSOptions.Config.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
cert := klet.serverCertificateManager.Current()
|
||||
if cert == nil {
|
||||
return nil, fmt.Errorf("no certificate available")
|
||||
return nil, fmt.Errorf("no serving certificate available for the kubelet")
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
@ -899,6 +901,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) {
|
||||
if containerRuntime == kubetypes.DockerContainerRuntime {
|
||||
glog.Warningln("Accelerators feature is deprecated and will be removed in v1.11. Please use device plugins instead. They can be enabled using the DevicePlugins feature gate.")
|
||||
if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1007,6 +1010,9 @@ type Kubelet struct {
|
||||
// Manager for image garbage collection.
|
||||
imageManager images.ImageGCManager
|
||||
|
||||
// Manager for container logs.
|
||||
containerLogManager logs.ContainerLogManager
|
||||
|
||||
// Secret manager.
|
||||
secretManager secret.Manager
|
||||
|
||||
@ -1032,11 +1038,7 @@ type Kubelet struct {
|
||||
|
||||
// Cloud provider interface.
|
||||
cloud cloudprovider.Interface
|
||||
// DEPRECATED: auto detecting cloud providers goes against the initiative
|
||||
// for out-of-tree cloud providers as we'll now depend on cAdvisor integrations
|
||||
// with cloud providers instead of in the core repo.
|
||||
// More details here: https://github.com/kubernetes/kubernetes/issues/50986
|
||||
autoDetectCloudProvider bool
|
||||
|
||||
// Indicates that the node initialization happens in an external cloud controller
|
||||
externalCloudProvider bool
|
||||
// Reference to this node.
|
||||
@ -1291,7 +1293,7 @@ func (kl *Kubelet) StartGarbageCollection() {
|
||||
// Note that the modules here must not depend on modules that are not initialized here.
|
||||
func (kl *Kubelet) initializeModules() error {
|
||||
// Prometheus metrics.
|
||||
metrics.Register(kl.runtimeCache)
|
||||
metrics.Register(kl.runtimeCache, collectors.NewVolumeStatsCollector(kl))
|
||||
|
||||
// Setup filesystem directories.
|
||||
if err := kl.setupDataDirs(); err != nil {
|
||||
@ -1313,16 +1315,6 @@ func (kl *Kubelet) initializeModules() error {
|
||||
kl.serverCertificateManager.Start()
|
||||
}
|
||||
|
||||
// Start container manager.
|
||||
node, err := kl.getNodeAnyWay()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Kubelet failed to get node info: %v", err)
|
||||
}
|
||||
|
||||
if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil {
|
||||
return fmt.Errorf("Failed to start ContainerManager %v", err)
|
||||
}
|
||||
|
||||
// Start out of memory watcher.
|
||||
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
|
||||
return fmt.Errorf("Failed to start OOM watcher %v", err)
|
||||
@ -1347,7 +1339,25 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
||||
glog.Fatalf("Failed to start cAdvisor %v", err)
|
||||
}
|
||||
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
|
||||
kl.evictionManager.Start(kl.cadvisor, kl.GetActivePods, kl.podResourcesAreReclaimed, kl.containerManager, evictionMonitoringPeriod)
|
||||
kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod)
|
||||
|
||||
// trigger on-demand stats collection once so that we have capacity information for ephemeral storage.
|
||||
// ignore any errors, since if stats collection is not successful, the container manager will fail to start below.
|
||||
kl.StatsProvider.GetCgroupStats("/", true)
|
||||
// Start container manager.
|
||||
node, err := kl.getNodeAnyWay()
|
||||
if err != nil {
|
||||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||
glog.Fatalf("Kubelet failed to get node info: %v", err)
|
||||
}
|
||||
// containerManager must start after cAdvisor because it needs filesystem capacity information
|
||||
if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil {
|
||||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||
glog.Fatalf("Failed to start ContainerManager %v", err)
|
||||
}
|
||||
// container log manager must start after container runtime is up to retrieve information from container runtime
|
||||
// and inform container to reopen log file after log rotation.
|
||||
kl.containerLogManager.Start()
|
||||
}
|
||||
|
||||
// Run starts the kubelet reacting to config updates
|
||||
@ -1361,8 +1371,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
|
||||
if err := kl.initializeModules(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
|
||||
glog.Error(err)
|
||||
kl.runtimeState.setInitError(err)
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
// Start volume manager
|
||||
@ -1773,12 +1782,22 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
||||
housekeepingTicker := time.NewTicker(housekeepingPeriod)
|
||||
defer housekeepingTicker.Stop()
|
||||
plegCh := kl.pleg.Watch()
|
||||
const (
|
||||
base = 100 * time.Millisecond
|
||||
max = 5 * time.Second
|
||||
factor = 2
|
||||
)
|
||||
duration := base
|
||||
for {
|
||||
if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {
|
||||
glog.Infof("skipping pod synchronization - %v", rs)
|
||||
time.Sleep(5 * time.Second)
|
||||
// exponential backoff
|
||||
time.Sleep(duration)
|
||||
duration = time.Duration(math.Min(float64(max), factor*float64(duration)))
|
||||
continue
|
||||
}
|
||||
// reset backoff if we have a success
|
||||
duration = base
|
||||
|
||||
kl.syncLoopMonitor.Store(kl.clock.Now())
|
||||
if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
|
||||
|
Reference in New Issue
Block a user