mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
572
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
572
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
@ -27,7 +27,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -61,6 +60,7 @@ import (
|
||||
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
@ -69,8 +69,6 @@ import (
|
||||
dockerremote "k8s.io/kubernetes/pkg/kubelet/dockershim/remote"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
"k8s.io/kubernetes/pkg/kubelet/gpu"
|
||||
"k8s.io/kubernetes/pkg/kubelet/gpu/nvidia"
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
||||
@ -78,7 +76,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
@ -86,7 +83,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/remote"
|
||||
"k8s.io/kubernetes/pkg/kubelet/rkt"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server"
|
||||
serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||
@ -94,13 +90,17 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/stats"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
"k8s.io/kubernetes/pkg/kubelet/token"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
sysctlwhitelist "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl"
|
||||
utildbus "k8s.io/kubernetes/pkg/util/dbus"
|
||||
kubeio "k8s.io/kubernetes/pkg/util/io"
|
||||
utilipt "k8s.io/kubernetes/pkg/util/iptables"
|
||||
@ -108,6 +108,7 @@ import (
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/oom"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
@ -221,7 +222,8 @@ type Builder func(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
keepTerminatedPodVolumes bool,
|
||||
nodeLabels map[string]string,
|
||||
seccompProfileRoot string,
|
||||
bootstrapCheckpointPath string) (Bootstrap, error)
|
||||
bootstrapCheckpointPath string,
|
||||
nodeStatusMaxImages int32) (Bootstrap, error)
|
||||
|
||||
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
|
||||
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
|
||||
@ -237,10 +239,10 @@ type Dependencies struct {
|
||||
DockerClientConfig *dockershim.ClientConfig
|
||||
EventClient v1core.EventsGetter
|
||||
HeartbeatClient v1core.CoreV1Interface
|
||||
OnHeartbeatFailure func()
|
||||
KubeClient clientset.Interface
|
||||
ExternalKubeClient clientset.Interface
|
||||
Mounter mount.Interface
|
||||
NetworkPlugins []network.NetworkPlugin
|
||||
OOMAdjuster *oom.OOMAdjuster
|
||||
OSInterface kubecontainer.OSInterface
|
||||
PodConfig *config.PodConfig
|
||||
@ -346,7 +348,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
keepTerminatedPodVolumes bool,
|
||||
nodeLabels map[string]string,
|
||||
seccompProfileRoot string,
|
||||
bootstrapCheckpointPath string) (*Kubelet, error) {
|
||||
bootstrapCheckpointPath string,
|
||||
nodeStatusMaxImages int32) (*Kubelet, error) {
|
||||
if rootDirectory == "" {
|
||||
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
|
||||
}
|
||||
@ -443,6 +446,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
MaxPodGracePeriodSeconds: int64(kubeCfg.EvictionMaxPodGracePeriod),
|
||||
Thresholds: thresholds,
|
||||
KernelMemcgNotification: experimentalKernelMemcgNotification,
|
||||
PodCgroupRoot: kubeDeps.ContainerManager.GetPodCgroupRoot(),
|
||||
}
|
||||
|
||||
serviceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
@ -493,6 +497,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
nodeName: nodeName,
|
||||
kubeClient: kubeDeps.KubeClient,
|
||||
heartbeatClient: kubeDeps.HeartbeatClient,
|
||||
onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure,
|
||||
rootDirectory: rootDirectory,
|
||||
resyncInterval: kubeCfg.SyncFrequency.Duration,
|
||||
sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources),
|
||||
@ -512,20 +517,22 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
nodeRef: nodeRef,
|
||||
nodeLabels: nodeLabels,
|
||||
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
writer: kubeDeps.Writer,
|
||||
maxPods: int(kubeCfg.MaxPods),
|
||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||
syncLoopMonitor: atomic.Value{},
|
||||
daemonEndpoints: daemonEndpoints,
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
containerRuntimeName: containerRuntime,
|
||||
nodeIP: parsedNodeIP,
|
||||
clock: clock.RealClock{},
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
writer: kubeDeps.Writer,
|
||||
maxPods: int(kubeCfg.MaxPods),
|
||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||
syncLoopMonitor: atomic.Value{},
|
||||
daemonEndpoints: daemonEndpoints,
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
containerRuntimeName: containerRuntime,
|
||||
redirectContainerStreaming: crOptions.RedirectContainerStreaming,
|
||||
nodeIP: parsedNodeIP,
|
||||
nodeIPValidator: validateNodeIP,
|
||||
clock: clock.RealClock{},
|
||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||
iptClient: utilipt.New(utilexec.New(), utildbus.New(), utilipt.ProtocolIpv4),
|
||||
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
||||
@ -533,46 +540,49 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
iptablesDropBit: int(kubeCfg.IPTablesDropBit),
|
||||
experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate),
|
||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
||||
nodeStatusMaxImages: nodeStatusMaxImages,
|
||||
enablePluginsWatcher: utilfeature.DefaultFeatureGate.Enabled(features.KubeletPluginsWatcher),
|
||||
}
|
||||
|
||||
if klet.cloud != nil {
|
||||
klet.cloudproviderRequestParallelism = make(chan int, 1)
|
||||
klet.cloudproviderRequestSync = make(chan int)
|
||||
// TODO(jchaloup): Make it configurable via --cloud-provider-request-timeout
|
||||
klet.cloudproviderRequestTimeout = 10 * time.Second
|
||||
}
|
||||
|
||||
secretManager := secret.NewCachingSecretManager(
|
||||
kubeDeps.KubeClient, secret.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
klet.secretManager = secretManager
|
||||
|
||||
configMapManager := configmap.NewCachingConfigMapManager(
|
||||
kubeDeps.KubeClient, configmap.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
kubeDeps.KubeClient, manager.GetObjectTTLFromNodeFunc(klet.GetNode))
|
||||
klet.configMapManager = configMapManager
|
||||
|
||||
if klet.experimentalHostUserNamespaceDefaulting {
|
||||
glog.Infof("Experimental host user namespace defaulting is enabled.")
|
||||
}
|
||||
|
||||
hairpinMode, err := effectiveHairpinMode(kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode), containerRuntime, crOptions.NetworkPluginName)
|
||||
if err != nil {
|
||||
// This is a non-recoverable error. Returning it up the callstack will just
|
||||
// lead to retries of the same failure, so just fail hard.
|
||||
glog.Fatalf("Invalid hairpin mode: %v", err)
|
||||
}
|
||||
glog.Infof("Hairpin mode set to %q", hairpinMode)
|
||||
|
||||
plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, nonMasqueradeCIDR, int(crOptions.NetworkPluginMTU))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.networkPlugin = plug
|
||||
|
||||
machineInfo, err := klet.GetCachedMachineInfo()
|
||||
machineInfo, err := klet.cadvisor.MachineInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.machineInfo = machineInfo
|
||||
|
||||
imageBackOff := flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||
|
||||
klet.livenessManager = proberesults.NewManager()
|
||||
|
||||
klet.podCache = kubecontainer.NewCache()
|
||||
var checkpointManager checkpointmanager.CheckpointManager
|
||||
if bootstrapCheckpointPath != "" {
|
||||
checkpointManager, err = checkpointmanager.NewCheckpointManager(bootstrapCheckpointPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize checkpoint manager: %+v", err)
|
||||
}
|
||||
}
|
||||
// podManager is also responsible for keeping secretManager and configMapManager contents up-to-date.
|
||||
klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient), secretManager, configMapManager)
|
||||
klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient), secretManager, configMapManager, checkpointManager)
|
||||
|
||||
if remoteRuntimeEndpoint != "" {
|
||||
// remoteImageEndpoint is same as remoteRuntimeEndpoint if not explicitly specified
|
||||
@ -583,161 +593,110 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
|
||||
// TODO: These need to become arguments to a standalone docker shim.
|
||||
pluginSettings := dockershim.NetworkPluginSettings{
|
||||
HairpinMode: hairpinMode,
|
||||
NonMasqueradeCIDR: nonMasqueradeCIDR,
|
||||
PluginName: crOptions.NetworkPluginName,
|
||||
PluginConfDir: crOptions.CNIConfDir,
|
||||
PluginBinDir: crOptions.CNIBinDir,
|
||||
MTU: int(crOptions.NetworkPluginMTU),
|
||||
HairpinMode: kubeletconfiginternal.HairpinMode(kubeCfg.HairpinMode),
|
||||
NonMasqueradeCIDR: nonMasqueradeCIDR,
|
||||
PluginName: crOptions.NetworkPluginName,
|
||||
PluginConfDir: crOptions.CNIConfDir,
|
||||
PluginBinDirString: crOptions.CNIBinDir,
|
||||
MTU: int(crOptions.NetworkPluginMTU),
|
||||
}
|
||||
|
||||
klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration)
|
||||
|
||||
// Remote runtime shim just cannot talk back to kubelet, so it doesn't
|
||||
// support bandwidth shaping or hostports till #35457. To enable legacy
|
||||
// features, replace with networkHost.
|
||||
var nl *NoOpLegacyHost
|
||||
pluginSettings.LegacyRuntimeHost = nl
|
||||
|
||||
if containerRuntime == kubetypes.RktContainerRuntime {
|
||||
glog.Warningln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.")
|
||||
if containerRuntime == "rkt" {
|
||||
glog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.")
|
||||
}
|
||||
|
||||
// rktnetes cannot be run with CRI.
|
||||
if containerRuntime != kubetypes.RktContainerRuntime {
|
||||
// kubelet defers to the runtime shim to setup networking. Setting
|
||||
// this to nil will prevent it from trying to invoke the plugin.
|
||||
// It's easier to always probe and initialize plugins till cri
|
||||
// becomes the default.
|
||||
klet.networkPlugin = nil
|
||||
// if left at nil, that means it is unneeded
|
||||
var legacyLogProvider kuberuntime.LegacyLogProvider
|
||||
// if left at nil, that means it is unneeded
|
||||
var legacyLogProvider kuberuntime.LegacyLogProvider
|
||||
|
||||
switch containerRuntime {
|
||||
case kubetypes.DockerContainerRuntime:
|
||||
// Create and start the CRI shim running as a grpc server.
|
||||
streamingConfig := getStreamingConfig(kubeCfg, kubeDeps)
|
||||
ds, err := dockershim.NewDockerService(kubeDeps.DockerClientConfig, crOptions.PodSandboxImage, streamingConfig,
|
||||
&pluginSettings, runtimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory,
|
||||
crOptions.DockerDisableSharedPID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// For now, the CRI shim redirects the streaming requests to the
|
||||
// kubelet, which handles the requests using DockerService..
|
||||
switch containerRuntime {
|
||||
case kubetypes.DockerContainerRuntime:
|
||||
// Create and start the CRI shim running as a grpc server.
|
||||
streamingConfig := getStreamingConfig(kubeCfg, kubeDeps, crOptions)
|
||||
ds, err := dockershim.NewDockerService(kubeDeps.DockerClientConfig, crOptions.PodSandboxImage, streamingConfig,
|
||||
&pluginSettings, runtimeCgroups, kubeCfg.CgroupDriver, crOptions.DockershimRootDirectory,
|
||||
crOptions.DockerDisableSharedPID, !crOptions.RedirectContainerStreaming)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if crOptions.RedirectContainerStreaming {
|
||||
klet.criHandler = ds
|
||||
|
||||
// The unix socket for kubelet <-> dockershim communication.
|
||||
glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q",
|
||||
remoteRuntimeEndpoint,
|
||||
remoteImageEndpoint)
|
||||
glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.")
|
||||
server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds)
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create dockerLegacyService when the logging driver is not supported.
|
||||
supported, err := ds.IsCRISupportedLogDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !supported {
|
||||
klet.dockerLegacyService = ds
|
||||
legacyLogProvider = ds
|
||||
}
|
||||
case kubetypes.RemoteContainerRuntime:
|
||||
// No-op.
|
||||
break
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported CRI runtime: %q", containerRuntime)
|
||||
}
|
||||
runtimeService, imageService, err := getRuntimeAndImageServices(remoteRuntimeEndpoint, remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout)
|
||||
|
||||
// The unix socket for kubelet <-> dockershim communication.
|
||||
glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q",
|
||||
remoteRuntimeEndpoint,
|
||||
remoteImageEndpoint)
|
||||
glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.")
|
||||
server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds)
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create dockerLegacyService when the logging driver is not supported.
|
||||
supported, err := ds.IsCRISupportedLogDriver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.runtimeService = runtimeService
|
||||
runtime, err := kuberuntime.NewKubeGenericRuntimeManager(
|
||||
kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
|
||||
klet.livenessManager,
|
||||
seccompProfileRoot,
|
||||
containerRefManager,
|
||||
machineInfo,
|
||||
klet,
|
||||
kubeDeps.OSInterface,
|
||||
klet,
|
||||
httpClient,
|
||||
imageBackOff,
|
||||
kubeCfg.SerializeImagePulls,
|
||||
float32(kubeCfg.RegistryPullQPS),
|
||||
int(kubeCfg.RegistryBurst),
|
||||
kubeCfg.CPUCFSQuota,
|
||||
runtimeService,
|
||||
imageService,
|
||||
kubeDeps.ContainerManager.InternalContainerLifecycle(),
|
||||
legacyLogProvider,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !supported {
|
||||
klet.dockerLegacyService = ds
|
||||
legacyLogProvider = ds
|
||||
}
|
||||
klet.containerRuntime = runtime
|
||||
klet.runner = runtime
|
||||
case kubetypes.RemoteContainerRuntime:
|
||||
// No-op.
|
||||
break
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported CRI runtime: %q", containerRuntime)
|
||||
}
|
||||
runtimeService, imageService, err := getRuntimeAndImageServices(remoteRuntimeEndpoint, remoteImageEndpoint, kubeCfg.RuntimeRequestTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.runtimeService = runtimeService
|
||||
runtime, err := kuberuntime.NewKubeGenericRuntimeManager(
|
||||
kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
|
||||
klet.livenessManager,
|
||||
seccompProfileRoot,
|
||||
containerRefManager,
|
||||
machineInfo,
|
||||
klet,
|
||||
kubeDeps.OSInterface,
|
||||
klet,
|
||||
httpClient,
|
||||
imageBackOff,
|
||||
kubeCfg.SerializeImagePulls,
|
||||
float32(kubeCfg.RegistryPullQPS),
|
||||
int(kubeCfg.RegistryBurst),
|
||||
kubeCfg.CPUCFSQuota,
|
||||
runtimeService,
|
||||
imageService,
|
||||
kubeDeps.ContainerManager.InternalContainerLifecycle(),
|
||||
legacyLogProvider,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.containerRuntime = runtime
|
||||
klet.streamingRuntime = runtime
|
||||
klet.runner = runtime
|
||||
|
||||
if cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) {
|
||||
klet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
klet.cadvisor,
|
||||
klet.resourceAnalyzer,
|
||||
klet.podManager,
|
||||
klet.runtimeCache,
|
||||
klet.containerRuntime)
|
||||
} else {
|
||||
klet.StatsProvider = stats.NewCRIStatsProvider(
|
||||
klet.cadvisor,
|
||||
klet.resourceAnalyzer,
|
||||
klet.podManager,
|
||||
klet.runtimeCache,
|
||||
runtimeService,
|
||||
imageService,
|
||||
stats.NewLogMetricsService())
|
||||
}
|
||||
} else {
|
||||
// rkt uses the legacy, non-CRI, integration. Configure it the old way.
|
||||
// TODO: Include hairpin mode settings in rkt?
|
||||
conf := &rkt.Config{
|
||||
Path: crOptions.RktPath,
|
||||
Stage1Image: crOptions.RktStage1Image,
|
||||
InsecureOptions: "image,ondisk",
|
||||
}
|
||||
runtime, err := rkt.New(
|
||||
crOptions.RktAPIEndpoint,
|
||||
conf,
|
||||
klet,
|
||||
kubeDeps.Recorder,
|
||||
containerRefManager,
|
||||
klet,
|
||||
klet.livenessManager,
|
||||
httpClient,
|
||||
klet.networkPlugin,
|
||||
hairpinMode == kubeletconfiginternal.HairpinVeth,
|
||||
utilexec.New(),
|
||||
kubecontainer.RealOS{},
|
||||
imageBackOff,
|
||||
kubeCfg.SerializeImagePulls,
|
||||
float32(kubeCfg.RegistryPullQPS),
|
||||
int(kubeCfg.RegistryBurst),
|
||||
kubeCfg.RuntimeRequestTimeout.Duration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.containerRuntime = runtime
|
||||
klet.runner = kubecontainer.DirectStreamingRunner(runtime)
|
||||
if cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) {
|
||||
klet.StatsProvider = stats.NewCadvisorStatsProvider(
|
||||
klet.cadvisor,
|
||||
klet.resourceAnalyzer,
|
||||
klet.podManager,
|
||||
klet.runtimeCache,
|
||||
klet.containerRuntime)
|
||||
} else {
|
||||
klet.StatsProvider = stats.NewCRIStatsProvider(
|
||||
klet.cadvisor,
|
||||
klet.resourceAnalyzer,
|
||||
klet.podManager,
|
||||
klet.runtimeCache,
|
||||
runtimeService,
|
||||
imageService,
|
||||
stats.NewLogMetricsService())
|
||||
}
|
||||
|
||||
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})
|
||||
@ -777,21 +736,29 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
|
||||
klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) && kubeDeps.TLSOptions != nil {
|
||||
var ips []net.IP
|
||||
cfgAddress := net.ParseIP(kubeCfg.Address)
|
||||
if cfgAddress == nil || cfgAddress.IsUnspecified() {
|
||||
localIPs, err := allLocalIPsWithoutLoopback()
|
||||
if kubeCfg.ServerTLSBootstrap && kubeDeps.TLSOptions != nil && utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
var (
|
||||
ips []net.IP
|
||||
names []string
|
||||
)
|
||||
|
||||
// If the address was explicitly configured, use that. Otherwise, try to
|
||||
// discover addresses from the cloudprovider. Otherwise, make a best guess.
|
||||
if cfgAddress := net.ParseIP(kubeCfg.Address); cfgAddress != nil && !cfgAddress.IsUnspecified() {
|
||||
ips = []net.IP{cfgAddress}
|
||||
names = []string{klet.GetHostname(), hostnameOverride}
|
||||
} else if len(cloudIPs) != 0 || len(cloudNames) != 0 {
|
||||
ips = cloudIPs
|
||||
names = cloudNames
|
||||
} else {
|
||||
localIPs, err := allGlobalUnicastIPs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips = localIPs
|
||||
} else {
|
||||
ips = []net.IP{cfgAddress}
|
||||
names = []string{klet.GetHostname(), hostnameOverride}
|
||||
}
|
||||
|
||||
ips = append(ips, cloudIPs...)
|
||||
names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...)
|
||||
klet.serverCertificateManager, err = kubeletcertificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names, certDirectory)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize certificate manager: %v", err)
|
||||
@ -812,11 +779,16 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
containerRefManager,
|
||||
kubeDeps.Recorder)
|
||||
|
||||
tokenManager := token.NewManager(kubeDeps.KubeClient)
|
||||
|
||||
klet.volumePluginMgr, err =
|
||||
NewInitializedVolumePluginMgr(klet, secretManager, configMapManager, kubeDeps.VolumePlugins, kubeDeps.DynamicPluginProber)
|
||||
NewInitializedVolumePluginMgr(klet, secretManager, configMapManager, tokenManager, kubeDeps.VolumePlugins, kubeDeps.DynamicPluginProber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if klet.enablePluginsWatcher {
|
||||
klet.pluginWatcher = pluginwatcher.NewWatcher(klet.getPluginsDir())
|
||||
}
|
||||
|
||||
// If the experimentalMounterPathFlag is set, we do not want to
|
||||
// check node capabilities since the mount path is not the default
|
||||
@ -861,25 +833,23 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.evictionManager = evictionManager
|
||||
klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
|
||||
|
||||
// add sysctl admission
|
||||
runtimeSupport, err := sysctl.NewRuntimeAdmitHandler(klet.containerRuntime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.Sysctls) {
|
||||
// add sysctl admission
|
||||
runtimeSupport, err := sysctl.NewRuntimeAdmitHandler(klet.containerRuntime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Safe, whitelisted sysctls can always be used as unsafe sysctls in the spec.
|
||||
// Hence, we concatenate those two lists.
|
||||
safeAndUnsafeSysctls := append(sysctlwhitelist.SafeSysctlWhitelist(), allowedUnsafeSysctls...)
|
||||
sysctlsWhitelist, err := sysctl.NewWhitelist(safeAndUnsafeSysctls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.admitHandlers.AddPodAdmitHandler(runtimeSupport)
|
||||
klet.admitHandlers.AddPodAdmitHandler(sysctlsWhitelist)
|
||||
}
|
||||
safeWhitelist, err := sysctl.NewWhitelist(sysctl.SafeSysctlWhitelist(), v1.SysctlsPodAnnotationKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Safe, whitelisted sysctls can always be used as unsafe sysctls in the spec
|
||||
// Hence, we concatenate those two lists.
|
||||
safeAndUnsafeSysctls := append(sysctl.SafeSysctlWhitelist(), allowedUnsafeSysctls...)
|
||||
unsafeWhitelist, err := sysctl.NewWhitelist(safeAndUnsafeSysctls, v1.UnsafeSysctlsPodAnnotationKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klet.admitHandlers.AddPodAdmitHandler(runtimeSupport)
|
||||
klet.admitHandlers.AddPodAdmitHandler(safeWhitelist)
|
||||
klet.admitHandlers.AddPodAdmitHandler(unsafeWhitelist)
|
||||
|
||||
// enable active deadline handler
|
||||
activeDeadlineHandler, err := newActiveDeadlineHandler(klet.statusManager, kubeDeps.Recorder, klet.clock)
|
||||
@ -899,20 +869,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
klet.appArmorValidator = apparmor.NewValidator(containerRuntime)
|
||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator))
|
||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.Accelerators) {
|
||||
if containerRuntime == kubetypes.DockerContainerRuntime {
|
||||
glog.Warningln("Accelerators feature is deprecated and will be removed in v1.11. Please use device plugins instead. They can be enabled using the DevicePlugins feature gate.")
|
||||
if klet.gpuManager, err = nvidia.NewNvidiaGPUManager(klet, kubeDeps.DockerClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Accelerators feature is supported with docker runtime only. Disabling this feature internally.")
|
||||
}
|
||||
}
|
||||
// Set GPU manager to a stub implementation if it is not enabled or cannot be supported.
|
||||
if klet.gpuManager == nil {
|
||||
klet.gpuManager = gpu.NewGPUManagerStub()
|
||||
}
|
||||
// Finally, put the most recent version of the config on the Kubelet, so
|
||||
// people can see how it was configured.
|
||||
klet.kubeletConfiguration = *kubeCfg
|
||||
@ -935,6 +891,9 @@ type Kubelet struct {
|
||||
iptClient utilipt.Interface
|
||||
rootDirectory string
|
||||
|
||||
// onRepeatedHeartbeatFailure is called when a heartbeat operation fails more than once. optional.
|
||||
onRepeatedHeartbeatFailure func()
|
||||
|
||||
// podWorkers handle syncing Pods in response to events.
|
||||
podWorkers PodWorkers
|
||||
|
||||
@ -989,9 +948,6 @@ type Kubelet struct {
|
||||
// Volume plugins.
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
|
||||
// Network plugin.
|
||||
networkPlugin network.NetworkPlugin
|
||||
|
||||
// Handles container probing.
|
||||
probeManager prober.Manager
|
||||
// Manages container health check results.
|
||||
@ -1038,6 +994,14 @@ type Kubelet struct {
|
||||
|
||||
// Cloud provider interface.
|
||||
cloud cloudprovider.Interface
|
||||
// To keep exclusive access to the cloudproviderRequestParallelism
|
||||
cloudproviderRequestMux sync.Mutex
|
||||
// Keep the count of requests processed in parallel (expected to be 1 at most at a given time)
|
||||
cloudproviderRequestParallelism chan int
|
||||
// Sync with finished requests
|
||||
cloudproviderRequestSync chan int
|
||||
// Request timeout
|
||||
cloudproviderRequestTimeout time.Duration
|
||||
|
||||
// Indicates that the node initialization happens in an external cloud controller
|
||||
externalCloudProvider bool
|
||||
@ -1047,9 +1011,15 @@ type Kubelet struct {
|
||||
// The name of the container runtime
|
||||
containerRuntimeName string
|
||||
|
||||
// redirectContainerStreaming enables container streaming redirect.
|
||||
redirectContainerStreaming bool
|
||||
|
||||
// Container runtime.
|
||||
containerRuntime kubecontainer.Runtime
|
||||
|
||||
// Streaming runtime handles container streaming.
|
||||
streamingRuntime kubecontainer.StreamingRuntime
|
||||
|
||||
// Container runtime service (needed by container runtime Start()).
|
||||
// TODO(CD): try to make this available without holding a reference in this
|
||||
// struct. For example, by adding a getter to generic runtime.
|
||||
@ -1126,6 +1096,9 @@ type Kubelet struct {
|
||||
// If non-nil, use this IP address for the node
|
||||
nodeIP net.IP
|
||||
|
||||
// use this function to validate the kubelet nodeIP
|
||||
nodeIPValidator func(net.IP) error
|
||||
|
||||
// If non-nil, this is a unique identifier for the node in an external database, eg. cloudprovider
|
||||
providerID string
|
||||
|
||||
@ -1185,9 +1158,6 @@ type Kubelet struct {
|
||||
// experimental behavior is desired.
|
||||
experimentalHostUserNamespaceDefaulting bool
|
||||
|
||||
// GPU Manager
|
||||
gpuManager gpu.GPUManager
|
||||
|
||||
// dockerLegacyService contains some legacy methods for backward compatibility.
|
||||
// It should be set only when docker is using non json-file logging driver.
|
||||
dockerLegacyService dockershim.DockerLegacyService
|
||||
@ -1195,15 +1165,23 @@ type Kubelet struct {
|
||||
// StatsProvider provides the node and the container stats.
|
||||
*stats.StatsProvider
|
||||
|
||||
// containerized should be set to true if the kubelet is running in a container
|
||||
containerized bool
|
||||
|
||||
// This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node.
|
||||
// This can be useful for debugging volume related issues.
|
||||
keepTerminatedPodVolumes bool // DEPRECATED
|
||||
|
||||
// pluginwatcher is a utility for Kubelet to register different types of node-level plugins
|
||||
// such as device plugins or CSI plugins. It discovers plugins by monitoring inotify events under the
|
||||
// directory returned by kubelet.getPluginsDir()
|
||||
pluginWatcher pluginwatcher.Watcher
|
||||
|
||||
// This flag sets a maximum number of images to report in the node status.
|
||||
nodeStatusMaxImages int32
|
||||
|
||||
// This flag indicates that kubelet should start plugin watcher utility server for discovering Kubelet plugins
|
||||
enablePluginsWatcher bool
|
||||
}
|
||||
|
||||
func allLocalIPsWithoutLoopback() ([]net.IP, error) {
|
||||
func allGlobalUnicastIPs() ([]net.IP, error) {
|
||||
interfaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list network interfaces: %v", err)
|
||||
@ -1217,7 +1195,7 @@ func allLocalIPsWithoutLoopback() ([]net.IP, error) {
|
||||
for _, address := range addresses {
|
||||
switch v := address.(type) {
|
||||
case *net.IPNet:
|
||||
if !v.IP.IsLoopback() {
|
||||
if v.IP.IsGlobalUnicast() {
|
||||
ips = append(ips, v.IP)
|
||||
}
|
||||
}
|
||||
@ -1266,6 +1244,12 @@ func (kl *Kubelet) StartGarbageCollection() {
|
||||
}
|
||||
}, ContainerGCPeriod, wait.NeverStop)
|
||||
|
||||
// when the high threshold is set to 100, stub the image GC manager
|
||||
if kl.kubeletConfiguration.ImageGCHighThresholdPercent == 100 {
|
||||
glog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC")
|
||||
return
|
||||
}
|
||||
|
||||
prevImageGCFailed := false
|
||||
go wait.Until(func() {
|
||||
if err := kl.imageManager.GarbageCollect(); err != nil {
|
||||
@ -1310,8 +1294,8 @@ func (kl *Kubelet) initializeModules() error {
|
||||
// Start the image manager.
|
||||
kl.imageManager.Start()
|
||||
|
||||
// Start the certificate manager.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
// Start the certificate manager if it was enabled.
|
||||
if kl.serverCertificateManager != nil {
|
||||
kl.serverCertificateManager.Start()
|
||||
}
|
||||
|
||||
@ -1320,11 +1304,6 @@ func (kl *Kubelet) initializeModules() error {
|
||||
return fmt.Errorf("Failed to start OOM watcher %v", err)
|
||||
}
|
||||
|
||||
// Initialize GPUs
|
||||
if err := kl.gpuManager.Start(); err != nil {
|
||||
glog.Errorf("Failed to start gpuManager %v", err)
|
||||
}
|
||||
|
||||
// Start resource analyzer
|
||||
kl.resourceAnalyzer.Start()
|
||||
|
||||
@ -1338,8 +1317,6 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
||||
// TODO(random-liu): Add backoff logic in the babysitter
|
||||
glog.Fatalf("Failed to start cAdvisor %v", err)
|
||||
}
|
||||
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
|
||||
kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod)
|
||||
|
||||
// trigger on-demand stats collection once so that we have capacity information for ephemeral storage.
|
||||
// ignore any errors, since if stats collection is not successful, the container manager will fail to start below.
|
||||
@ -1355,9 +1332,22 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
||||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||
glog.Fatalf("Failed to start ContainerManager %v", err)
|
||||
}
|
||||
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
|
||||
kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod)
|
||||
|
||||
// container log manager must start after container runtime is up to retrieve information from container runtime
|
||||
// and inform container to reopen log file after log rotation.
|
||||
kl.containerLogManager.Start()
|
||||
if kl.enablePluginsWatcher {
|
||||
// Adding Registration Callback function for CSI Driver
|
||||
kl.pluginWatcher.AddHandler("CSIPlugin", csi.RegistrationCallback)
|
||||
// Start the plugin watcher
|
||||
glog.V(4).Infof("starting watcher")
|
||||
if err := kl.pluginWatcher.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
|
||||
glog.Fatalf("failed to start Plugin Watcher. err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the kubelet reacting to config updates
|
||||
@ -1381,7 +1371,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
// Start syncing node status immediately, this may set up things the runtime needs to run.
|
||||
go wait.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, wait.NeverStop)
|
||||
}
|
||||
go wait.Until(kl.syncNetworkStatus, 30*time.Second, wait.NeverStop)
|
||||
go wait.Until(kl.updateRuntimeUp, 5*time.Second, wait.NeverStop)
|
||||
|
||||
// Start loop to sync iptables util rules
|
||||
@ -1407,13 +1396,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
kl.syncLoop(updates, kl)
|
||||
}
|
||||
|
||||
// GetKubeClient returns the Kubernetes client.
|
||||
// TODO: This is currently only required by network plugins. Replace
|
||||
// with more specific methods.
|
||||
func (kl *Kubelet) GetKubeClient() clientset.Interface {
|
||||
return kl.kubeClient
|
||||
}
|
||||
|
||||
// syncPod is the transaction script for the sync of a single pod.
|
||||
//
|
||||
// Arguments:
|
||||
@ -2065,11 +2047,18 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
|
||||
// HandlePodReconcile is the callback in the SyncHandler interface for pods
|
||||
// that should be reconciled.
|
||||
func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) {
|
||||
start := kl.clock.Now()
|
||||
for _, pod := range pods {
|
||||
// Update the pod in pod manager, status manager will do periodically reconcile according
|
||||
// to the pod manager.
|
||||
kl.podManager.UpdatePod(pod)
|
||||
|
||||
// Reconcile Pod "Ready" condition if necessary. Trigger sync pod for reconciliation.
|
||||
if status.NeedToReconcilePodReadiness(pod) {
|
||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||
kl.dispatchWork(pod, kubetypes.SyncPodSync, mirrorPod, start)
|
||||
}
|
||||
|
||||
// After an evicted pod is synced, all dead containers in the pod can be removed.
|
||||
if eviction.PodIsEvicted(pod.Status) {
|
||||
if podStatus, err := kl.podCache.Get(pod.UID); err == nil {
|
||||
@ -2108,53 +2097,35 @@ func (kl *Kubelet) updateRuntimeUp() {
|
||||
glog.Errorf("Container runtime sanity check failed: %v", err)
|
||||
return
|
||||
}
|
||||
// rkt uses the legacy, non-CRI integration. Don't check the runtime
|
||||
// conditions for it.
|
||||
if kl.containerRuntimeName != kubetypes.RktContainerRuntime {
|
||||
if s == nil {
|
||||
glog.Errorf("Container runtime status is nil")
|
||||
return
|
||||
}
|
||||
// Periodically log the whole runtime status for debugging.
|
||||
// TODO(random-liu): Consider to send node event when optional
|
||||
// condition is unmet.
|
||||
glog.V(4).Infof("Container runtime status: %v", s)
|
||||
networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady)
|
||||
if networkReady == nil || !networkReady.Status {
|
||||
glog.Errorf("Container runtime network not ready: %v", networkReady)
|
||||
kl.runtimeState.setNetworkState(fmt.Errorf("runtime network not ready: %v", networkReady))
|
||||
} else {
|
||||
// Set nil if the container runtime network is ready.
|
||||
kl.runtimeState.setNetworkState(nil)
|
||||
}
|
||||
// TODO(random-liu): Add runtime error in runtimeState, and update it
|
||||
// when runtime is not ready, so that the information in RuntimeReady
|
||||
// condition will be propagated to NodeReady condition.
|
||||
runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady)
|
||||
// If RuntimeReady is not set or is false, report an error.
|
||||
if runtimeReady == nil || !runtimeReady.Status {
|
||||
glog.Errorf("Container runtime not ready: %v", runtimeReady)
|
||||
return
|
||||
}
|
||||
if s == nil {
|
||||
glog.Errorf("Container runtime status is nil")
|
||||
return
|
||||
}
|
||||
// Periodically log the whole runtime status for debugging.
|
||||
// TODO(random-liu): Consider to send node event when optional
|
||||
// condition is unmet.
|
||||
glog.V(4).Infof("Container runtime status: %v", s)
|
||||
networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady)
|
||||
if networkReady == nil || !networkReady.Status {
|
||||
glog.Errorf("Container runtime network not ready: %v", networkReady)
|
||||
kl.runtimeState.setNetworkState(fmt.Errorf("runtime network not ready: %v", networkReady))
|
||||
} else {
|
||||
// Set nil if the container runtime network is ready.
|
||||
kl.runtimeState.setNetworkState(nil)
|
||||
}
|
||||
// TODO(random-liu): Add runtime error in runtimeState, and update it
|
||||
// when runtime is not ready, so that the information in RuntimeReady
|
||||
// condition will be propagated to NodeReady condition.
|
||||
runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady)
|
||||
// If RuntimeReady is not set or is false, report an error.
|
||||
if runtimeReady == nil || !runtimeReady.Status {
|
||||
glog.Errorf("Container runtime not ready: %v", runtimeReady)
|
||||
return
|
||||
}
|
||||
kl.oneTimeInitializer.Do(kl.initializeRuntimeDependentModules)
|
||||
kl.runtimeState.setRuntimeSync(kl.clock.Now())
|
||||
}
|
||||
|
||||
// updateCloudProviderFromMachineInfo updates the node's provider ID field
|
||||
// from the given cadvisor machine info.
|
||||
func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *v1.Node, info *cadvisorapi.MachineInfo) {
|
||||
if info.CloudProvider != cadvisorapi.UnknownProvider &&
|
||||
info.CloudProvider != cadvisorapi.Baremetal {
|
||||
// The cloud providers from pkg/cloudprovider/providers/* that update ProviderID
|
||||
// will use the format of cloudprovider://project/availability_zone/instance_name
|
||||
// here we only have the cloudprovider and the instance name so we leave project
|
||||
// and availability zone empty for compatibility.
|
||||
node.Spec.ProviderID = strings.ToLower(string(info.CloudProvider)) +
|
||||
":////" + string(info.InstanceID)
|
||||
}
|
||||
}
|
||||
|
||||
// GetConfiguration returns the KubeletConfiguration used to configure the kubelet.
|
||||
func (kl *Kubelet) GetConfiguration() kubeletconfiginternal.KubeletConfiguration {
|
||||
return kl.kubeletConfiguration
|
||||
@ -2166,11 +2137,6 @@ func (kl *Kubelet) BirthCry() {
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeNormal, events.StartingKubelet, "Starting kubelet.")
|
||||
}
|
||||
|
||||
// StreamingConnectionIdleTimeout returns the timeout for streaming connections to the HTTP server.
|
||||
func (kl *Kubelet) StreamingConnectionIdleTimeout() time.Duration {
|
||||
return kl.streamingConnectionIdleTimeout
|
||||
}
|
||||
|
||||
// ResyncInterval returns the interval used for periodic syncs.
|
||||
func (kl *Kubelet) ResyncInterval() time.Duration {
|
||||
return kl.resyncInterval
|
||||
@ -2178,12 +2144,12 @@ func (kl *Kubelet) ResyncInterval() time.Duration {
|
||||
|
||||
// ListenAndServe runs the kubelet HTTP server.
|
||||
func (kl *Kubelet) ListenAndServe(address net.IP, port uint, tlsOptions *server.TLSOptions, auth server.AuthInterface, enableDebuggingHandlers, enableContentionProfiling bool) {
|
||||
server.ListenAndServeKubeletServer(kl, kl.resourceAnalyzer, address, port, tlsOptions, auth, enableDebuggingHandlers, enableContentionProfiling, kl.containerRuntime, kl.criHandler)
|
||||
server.ListenAndServeKubeletServer(kl, kl.resourceAnalyzer, address, port, tlsOptions, auth, enableDebuggingHandlers, enableContentionProfiling, kl.redirectContainerStreaming, kl.criHandler)
|
||||
}
|
||||
|
||||
// ListenAndServeReadOnly runs the kubelet HTTP server in read-only mode.
|
||||
func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
|
||||
server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port, kl.containerRuntime)
|
||||
server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port)
|
||||
}
|
||||
|
||||
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
|
||||
@ -2207,19 +2173,23 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
|
||||
}
|
||||
|
||||
// Gets the streaming server configuration to use with in-process CRI shims.
|
||||
func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies) *streaming.Config {
|
||||
func getStreamingConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, crOptions *config.ContainerRuntimeOptions) *streaming.Config {
|
||||
config := &streaming.Config{
|
||||
// Use a relative redirect (no scheme or host).
|
||||
BaseURL: &url.URL{
|
||||
Path: "/cri/",
|
||||
},
|
||||
StreamIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
|
||||
StreamCreationTimeout: streaming.DefaultConfig.StreamCreationTimeout,
|
||||
SupportedRemoteCommandProtocols: streaming.DefaultConfig.SupportedRemoteCommandProtocols,
|
||||
SupportedPortForwardProtocols: streaming.DefaultConfig.SupportedPortForwardProtocols,
|
||||
}
|
||||
if kubeDeps.TLSOptions != nil {
|
||||
config.TLSConfig = kubeDeps.TLSOptions.Config
|
||||
if !crOptions.RedirectContainerStreaming {
|
||||
config.Addr = net.JoinHostPort("localhost", "0")
|
||||
} else {
|
||||
// Use a relative redirect (no scheme or host).
|
||||
config.BaseURL = &url.URL{
|
||||
Path: "/cri/",
|
||||
}
|
||||
if kubeDeps.TLSOptions != nil {
|
||||
config.TLSConfig = kubeDeps.TLSOptions.Config
|
||||
}
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
Reference in New Issue
Block a user