mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
406
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
406
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
@ -30,7 +30,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -41,7 +40,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
|
||||
@ -60,13 +58,18 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
utilfile "k8s.io/kubernetes/pkg/util/file"
|
||||
mountutil "k8s.io/kubernetes/pkg/util/mount"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
|
||||
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
|
||||
"k8s.io/kubernetes/third_party/forked/golang/expansion"
|
||||
)
|
||||
|
||||
const (
|
||||
managedHostsHeader = "# Kubernetes-managed hosts file.\n"
|
||||
managedHostsHeaderWithHostNetwork = "# Kubernetes-managed hosts file (host network).\n"
|
||||
)
|
||||
|
||||
// Get a list of pods that have data directories.
|
||||
func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
|
||||
podInfos, err := ioutil.ReadDir(kl.getPodsDir())
|
||||
@ -89,43 +92,6 @@ func (kl *Kubelet) GetActivePods() []*v1.Pod {
|
||||
return activePods
|
||||
}
|
||||
|
||||
// makeGPUDevices determines the devices for the given container.
|
||||
// Experimental.
|
||||
func (kl *Kubelet) makeGPUDevices(pod *v1.Pod, container *v1.Container) ([]kubecontainer.DeviceInfo, error) {
|
||||
if container.Resources.Limits.NvidiaGPU().IsZero() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nvidiaGPUPaths, err := kl.gpuManager.AllocateGPU(pod, container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var devices []kubecontainer.DeviceInfo
|
||||
for _, path := range nvidiaGPUPaths {
|
||||
// Devices have to be mapped one to one because of nvidia CUDA library requirements.
|
||||
devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: path, PathInContainer: path, Permissions: "mrw"})
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func makeAbsolutePath(goos, path string) string {
|
||||
if goos != "windows" {
|
||||
return "/" + path
|
||||
}
|
||||
// These are all for windows
|
||||
// If there is a colon, give up.
|
||||
if strings.Contains(path, ":") {
|
||||
return path
|
||||
}
|
||||
// If there is a slash, but no drive, add 'c:'
|
||||
if strings.HasPrefix(path, "/") || strings.HasPrefix(path, "\\") {
|
||||
return "c:" + path
|
||||
}
|
||||
// Otherwise, add 'c:\'
|
||||
return "c:\\" + path
|
||||
}
|
||||
|
||||
// makeBlockVolumes maps the raw block devices specified in the path of the container
|
||||
// Experimental
|
||||
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
|
||||
@ -160,7 +126,8 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol
|
||||
}
|
||||
|
||||
// makeMounts determines the mount points for the given container.
|
||||
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
|
||||
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, mounter mountutil.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) {
|
||||
|
||||
// Kubernetes only mounts on /etc/hosts if:
|
||||
// - container is not an infrastructure (pause) container
|
||||
// - container is not already mounting on /etc/hosts
|
||||
@ -170,13 +137,14 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
||||
mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows"
|
||||
glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
|
||||
mounts := []kubecontainer.Mount{}
|
||||
for _, mount := range container.VolumeMounts {
|
||||
var cleanupAction func() = nil
|
||||
for i, mount := range container.VolumeMounts {
|
||||
// do not mount /etc/hosts if container is already mounting on the path
|
||||
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
|
||||
vol, ok := podVolumes[mount.Name]
|
||||
if !ok || vol.Mounter == nil {
|
||||
glog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount)
|
||||
return nil, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
|
||||
return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
|
||||
}
|
||||
|
||||
relabelVolume := false
|
||||
@ -189,44 +157,61 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
||||
}
|
||||
hostPath, err := volumeutil.GetPath(vol.Mounter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
if mount.SubPath != "" {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpath) {
|
||||
return nil, cleanupAction, fmt.Errorf("volume subpaths are disabled")
|
||||
}
|
||||
|
||||
// Expand subpath variables
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpathEnvExpansion) {
|
||||
mount.SubPath = kubecontainer.ExpandContainerVolumeMounts(mount, expandEnvs)
|
||||
}
|
||||
|
||||
if filepath.IsAbs(mount.SubPath) {
|
||||
return nil, fmt.Errorf("error SubPath `%s` must not be an absolute path", mount.SubPath)
|
||||
return nil, cleanupAction, fmt.Errorf("error SubPath `%s` must not be an absolute path", mount.SubPath)
|
||||
}
|
||||
|
||||
err = volumevalidation.ValidatePathNoBacksteps(mount.SubPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to provision SubPath `%s`: %v", mount.SubPath, err)
|
||||
return nil, cleanupAction, fmt.Errorf("unable to provision SubPath `%s`: %v", mount.SubPath, err)
|
||||
}
|
||||
|
||||
fileinfo, err := os.Lstat(hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
perm := fileinfo.Mode()
|
||||
volumePath := hostPath
|
||||
hostPath = filepath.Join(volumePath, mount.SubPath)
|
||||
|
||||
hostPath = filepath.Join(hostPath, mount.SubPath)
|
||||
|
||||
if subPathExists, err := utilfile.FileOrSymlinkExists(hostPath); err != nil {
|
||||
if subPathExists, err := mounter.ExistsPath(hostPath); err != nil {
|
||||
glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath)
|
||||
} else if !subPathExists {
|
||||
// Create the sub path now because if it's auto-created later when referenced, it may have an
|
||||
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
|
||||
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
|
||||
// later auto-create it with the incorrect mode 0750
|
||||
if err := os.MkdirAll(hostPath, perm); err != nil {
|
||||
glog.Errorf("failed to mkdir:%s", hostPath)
|
||||
return nil, err
|
||||
// Make extra care not to escape the volume!
|
||||
perm, err := mounter.GetMode(volumePath)
|
||||
if err != nil {
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
|
||||
// chmod the sub path because umask may have prevented us from making the sub path with the same
|
||||
// permissions as the mounter path
|
||||
if err := os.Chmod(hostPath, perm); err != nil {
|
||||
return nil, err
|
||||
if err := mounter.SafeMakeDir(mount.SubPath, volumePath, perm); err != nil {
|
||||
// Don't pass detailed error back to the user because it could give information about host filesystem
|
||||
glog.Errorf("failed to create subPath directory for volumeMount %q of container %q: %v", mount.Name, container.Name, err)
|
||||
return nil, cleanupAction, fmt.Errorf("failed to create subPath directory for volumeMount %q of container %q", mount.Name, container.Name)
|
||||
}
|
||||
}
|
||||
hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{
|
||||
VolumeMountIndex: i,
|
||||
Path: hostPath,
|
||||
VolumeName: vol.InnerVolumeSpecName,
|
||||
VolumePath: volumePath,
|
||||
PodDir: podDir,
|
||||
ContainerName: container.Name,
|
||||
})
|
||||
if err != nil {
|
||||
// Don't pass detailed error back to the user because it could give information about host filesystem
|
||||
glog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err)
|
||||
return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Docker Volume Mounts fail on Windows if it is not of the form C:/
|
||||
@ -237,12 +222,12 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
||||
}
|
||||
}
|
||||
if !filepath.IsAbs(containerPath) {
|
||||
containerPath = makeAbsolutePath(runtime.GOOS, containerPath)
|
||||
containerPath = volumeutil.MakeAbsolutePath(runtime.GOOS, containerPath)
|
||||
}
|
||||
|
||||
propagation, err := translateMountPropagation(mount.MountPropagation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
glog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation)
|
||||
|
||||
@ -261,11 +246,11 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
||||
hostAliases := pod.Spec.HostAliases
|
||||
hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
mounts = append(mounts, *hostsMount)
|
||||
}
|
||||
return mounts, nil
|
||||
return mounts, cleanupAction, nil
|
||||
}
|
||||
|
||||
// translateMountPropagation transforms v1.MountPropagationMode to
|
||||
@ -283,12 +268,14 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M
|
||||
}
|
||||
switch {
|
||||
case mountMode == nil:
|
||||
// HostToContainer is the default
|
||||
return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil
|
||||
// PRIVATE is the default
|
||||
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
|
||||
case *mountMode == v1.MountPropagationHostToContainer:
|
||||
return runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER, nil
|
||||
case *mountMode == v1.MountPropagationBidirectional:
|
||||
return runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL, nil
|
||||
case *mountMode == v1.MountPropagationNone:
|
||||
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid MountPropagation mode: %q", mountMode)
|
||||
}
|
||||
@ -338,15 +325,18 @@ func nodeHostsFileContent(hostsFilePath string, hostAliases []v1.HostAlias) ([]b
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hostsFileContent = append(hostsFileContent, hostsEntriesFromHostAliases(hostAliases)...)
|
||||
return hostsFileContent, nil
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(managedHostsHeaderWithHostNetwork)
|
||||
buffer.Write(hostsFileContent)
|
||||
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// managedHostsFileContent generates the content of the managed etc hosts based on Pod IP and other
|
||||
// information.
|
||||
func managedHostsFileContent(hostIP, hostName, hostDomainName string, hostAliases []v1.HostAlias) []byte {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("# Kubernetes-managed hosts file.\n")
|
||||
buffer.WriteString(managedHostsHeader)
|
||||
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
|
||||
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
|
||||
buffer.WriteString("fe00::0\tip6-localnet\n")
|
||||
@ -358,9 +348,8 @@ func managedHostsFileContent(hostIP, hostName, hostDomainName string, hostAliase
|
||||
} else {
|
||||
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
|
||||
}
|
||||
hostsFileContent := buffer.Bytes()
|
||||
hostsFileContent = append(hostsFileContent, hostsEntriesFromHostAliases(hostAliases)...)
|
||||
return hostsFileContent
|
||||
buffer.Write(hostsEntriesFromHostAliases(hostAliases))
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte {
|
||||
@ -437,52 +426,44 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
|
||||
|
||||
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
|
||||
// the container runtime to set parameters for launching a container.
|
||||
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
|
||||
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, func(), error) {
|
||||
opts, err := kl.containerManager.GetResources(pod, container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cgroupParent := kl.GetPodCgroupParent(pod)
|
||||
opts.CgroupParent = cgroupParent
|
||||
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
opts.Hostname = hostname
|
||||
podName := volumeutil.GetUniquePodName(pod)
|
||||
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
|
||||
|
||||
opts.PortMappings = kubecontainer.MakePortMappings(container)
|
||||
// TODO(random-liu): Move following convert functions into pkg/kubelet/container
|
||||
devices, err := kl.makeGPUDevices(pod, container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Devices = append(opts.Devices, devices...)
|
||||
|
||||
// TODO: remove feature gate check after no longer needed
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
opts.Devices = append(opts.Devices, blkVolumes...)
|
||||
}
|
||||
|
||||
mounts, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.Mounts = append(opts.Mounts, mounts...)
|
||||
|
||||
envs, err := kl.makeEnvironmentVariables(pod, container, podIP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
opts.Envs = append(opts.Envs, envs...)
|
||||
|
||||
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes, kl.mounter, opts.Envs)
|
||||
if err != nil {
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
opts.Mounts = append(opts.Mounts, mounts...)
|
||||
|
||||
// Disabling adding TerminationMessagePath on Windows as these files would be mounted as docker volume and
|
||||
// Docker for Windows has a bug where only directories can be mounted
|
||||
if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" {
|
||||
@ -499,7 +480,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
||||
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
return opts, cleanupAction, nil
|
||||
}
|
||||
|
||||
var masterServices = sets.NewString("kubernetes")
|
||||
@ -921,7 +902,11 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo
|
||||
return false
|
||||
}
|
||||
if len(runtimeStatus.ContainerStatuses) > 0 {
|
||||
glog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %+v", format.Pod(pod), runtimeStatus.ContainerStatuses)
|
||||
var statusStr string
|
||||
for _, status := range runtimeStatus.ContainerStatuses {
|
||||
statusStr += fmt.Sprintf("%+v ", *status)
|
||||
}
|
||||
glog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr)
|
||||
return false
|
||||
}
|
||||
if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes {
|
||||
@ -1081,35 +1066,28 @@ func (kl *Kubelet) podKiller() {
|
||||
killing := sets.NewString()
|
||||
// guard for the killing set
|
||||
lock := sync.Mutex{}
|
||||
for {
|
||||
select {
|
||||
case podPair, ok := <-kl.podKillingCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for podPair := range kl.podKillingCh {
|
||||
runningPod := podPair.RunningPod
|
||||
apiPod := podPair.APIPod
|
||||
|
||||
runningPod := podPair.RunningPod
|
||||
apiPod := podPair.APIPod
|
||||
lock.Lock()
|
||||
exists := killing.Has(string(runningPod.ID))
|
||||
if !exists {
|
||||
killing.Insert(string(runningPod.ID))
|
||||
}
|
||||
lock.Unlock()
|
||||
|
||||
lock.Lock()
|
||||
exists := killing.Has(string(runningPod.ID))
|
||||
if !exists {
|
||||
killing.Insert(string(runningPod.ID))
|
||||
}
|
||||
lock.Unlock()
|
||||
|
||||
if !exists {
|
||||
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) {
|
||||
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||
err := kl.killPod(apiPod, runningPod, nil, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||
}
|
||||
lock.Lock()
|
||||
killing.Delete(string(runningPod.ID))
|
||||
lock.Unlock()
|
||||
}(apiPod, runningPod)
|
||||
}
|
||||
if !exists {
|
||||
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) {
|
||||
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||
err := kl.killPod(apiPod, runningPod, nil, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||
}
|
||||
lock.Lock()
|
||||
killing.Delete(string(runningPod.ID))
|
||||
lock.Unlock()
|
||||
}(apiPod, runningPod)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1375,14 +1353,12 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
}
|
||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
||||
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
|
||||
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
|
||||
// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
|
||||
if _, oldPodScheduled := podutil.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
|
||||
s.Conditions = append(s.Conditions, *oldPodScheduled)
|
||||
}
|
||||
podutil.UpdatePodCondition(s, &v1.PodCondition{
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
||||
// Status manager will take care of the LastTransitionTimestamp, either preserve
|
||||
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
|
||||
// `PodScheduled` condition must be true.
|
||||
s.Conditions = append(s.Conditions, v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
@ -1431,6 +1407,12 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
|
||||
true,
|
||||
)
|
||||
|
||||
// Preserves conditions not controlled by kubelet
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if !kubetypes.PodConditionByKubelet(c.Type) {
|
||||
apiPodStatus.Conditions = append(apiPodStatus.Conditions, c)
|
||||
}
|
||||
}
|
||||
return &apiPodStatus
|
||||
}
|
||||
|
||||
@ -1612,142 +1594,60 @@ func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containe
|
||||
return kl.runner.RunInContainer(container.ID, cmd, 0)
|
||||
}
|
||||
|
||||
// ExecInContainer executes a command in a container, connecting the supplied
|
||||
// stdin/stdout/stderr to the command's IO streams.
|
||||
func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
|
||||
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
|
||||
if !ok {
|
||||
return fmt.Errorf("streaming methods not supported by runtime")
|
||||
}
|
||||
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if container == nil {
|
||||
return fmt.Errorf("container not found (%q)", containerName)
|
||||
}
|
||||
return streamingRuntime.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty, resize, timeout)
|
||||
}
|
||||
|
||||
// AttachContainer uses the container runtime to attach the given streams to
|
||||
// the given container.
|
||||
func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
|
||||
if !ok {
|
||||
return fmt.Errorf("streaming methods not supported by runtime")
|
||||
}
|
||||
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if container == nil {
|
||||
return fmt.Errorf("container not found (%q)", containerName)
|
||||
}
|
||||
return streamingRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty, resize)
|
||||
}
|
||||
|
||||
// PortForward connects to the pod's port and copies data between the port
|
||||
// and the stream.
|
||||
func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error {
|
||||
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
|
||||
if !ok {
|
||||
return fmt.Errorf("streaming methods not supported by runtime")
|
||||
}
|
||||
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Resolve and type convert back again.
|
||||
// We need the static pod UID but the kubecontainer API works with types.UID.
|
||||
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
|
||||
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
|
||||
if pod.IsEmpty() {
|
||||
return fmt.Errorf("pod not found (%q)", podFullName)
|
||||
}
|
||||
return streamingRuntime.PortForward(&pod, port, stream)
|
||||
}
|
||||
|
||||
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
|
||||
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
switch streamingRuntime := kl.containerRuntime.(type) {
|
||||
case kubecontainer.DirectStreamingRuntime:
|
||||
// Kubelet will serve the exec directly.
|
||||
return nil, nil
|
||||
case kubecontainer.IndirectStreamingRuntime:
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if container == nil {
|
||||
return nil, fmt.Errorf("container not found (%q)", containerName)
|
||||
}
|
||||
return streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
|
||||
default:
|
||||
return nil, fmt.Errorf("container runtime does not support exec")
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if container == nil {
|
||||
return nil, fmt.Errorf("container not found (%q)", containerName)
|
||||
}
|
||||
return kl.streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
|
||||
}
|
||||
|
||||
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
|
||||
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) {
|
||||
switch streamingRuntime := kl.containerRuntime.(type) {
|
||||
case kubecontainer.DirectStreamingRuntime:
|
||||
// Kubelet will serve the attach directly.
|
||||
return nil, nil
|
||||
case kubecontainer.IndirectStreamingRuntime:
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if container == nil {
|
||||
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
|
||||
}
|
||||
|
||||
// The TTY setting for attach must match the TTY setting in the initial container configuration,
|
||||
// since whether the process is running in a TTY cannot be changed after it has started. We
|
||||
// need the api.Pod to get the TTY status.
|
||||
pod, found := kl.GetPodByFullName(podFullName)
|
||||
if !found || (string(podUID) != "" && pod.UID != podUID) {
|
||||
return nil, fmt.Errorf("pod %s not found", podFullName)
|
||||
}
|
||||
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
|
||||
if containerSpec == nil {
|
||||
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
|
||||
}
|
||||
tty := containerSpec.TTY
|
||||
|
||||
return streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
|
||||
default:
|
||||
return nil, fmt.Errorf("container runtime does not support attach")
|
||||
container, err := kl.findContainer(podFullName, podUID, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if container == nil {
|
||||
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
|
||||
}
|
||||
|
||||
// The TTY setting for attach must match the TTY setting in the initial container configuration,
|
||||
// since whether the process is running in a TTY cannot be changed after it has started. We
|
||||
// need the api.Pod to get the TTY status.
|
||||
pod, found := kl.GetPodByFullName(podFullName)
|
||||
if !found || (string(podUID) != "" && pod.UID != podUID) {
|
||||
return nil, fmt.Errorf("pod %s not found", podFullName)
|
||||
}
|
||||
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
|
||||
if containerSpec == nil {
|
||||
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
|
||||
}
|
||||
tty := containerSpec.TTY
|
||||
|
||||
return kl.streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
|
||||
}
|
||||
|
||||
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
|
||||
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) {
|
||||
switch streamingRuntime := kl.containerRuntime.(type) {
|
||||
case kubecontainer.DirectStreamingRuntime:
|
||||
// Kubelet will serve the attach directly.
|
||||
return nil, nil
|
||||
case kubecontainer.IndirectStreamingRuntime:
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Resolve and type convert back again.
|
||||
// We need the static pod UID but the kubecontainer API works with types.UID.
|
||||
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
|
||||
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
|
||||
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
|
||||
if pod.IsEmpty() {
|
||||
return nil, fmt.Errorf("pod not found (%q)", podFullName)
|
||||
}
|
||||
|
||||
return streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports)
|
||||
default:
|
||||
return nil, fmt.Errorf("container runtime does not support port-forward")
|
||||
pods, err := kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Resolve and type convert back again.
|
||||
// We need the static pod UID but the kubecontainer API works with types.UID.
|
||||
podUID = types.UID(kl.podManager.TranslatePodUID(podUID))
|
||||
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
|
||||
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
|
||||
if pod.IsEmpty() {
|
||||
return nil, fmt.Errorf("pod not found (%q)", podFullName)
|
||||
}
|
||||
|
||||
return kl.streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports)
|
||||
}
|
||||
|
||||
// cleanupOrphanedPodCgroups removes cgroups that should no longer exist.
|
||||
|
Reference in New Issue
Block a user