vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -40,7 +40,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/remotecommand"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -48,10 +47,9 @@ import (
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/apis/core/v1/validation"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/envvars"
@ -63,9 +61,8 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
utilfile "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
)
@ -131,7 +128,7 @@ func makeAbsolutePath(goos, path string) string {
// makeBlockVolumes maps the raw block devices specified in the path of the container
// Experimental
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumeutil.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
var devices []kubecontainer.DeviceInfo
for _, device := range container.VolumeDevices {
// check path is absolute
@ -190,7 +187,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := volume.GetPath(vol.Mounter)
hostPath, err := volumeutil.GetPath(vol.Mounter)
if err != nil {
return nil, err
}
@ -249,11 +246,13 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
}
glog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation)
mustMountRO := vol.Mounter.GetAttributes().ReadOnly && utilfeature.DefaultFeatureGate.Enabled(features.ReadOnlyAPIDataVolumes)
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly,
ReadOnly: mount.ReadOnly || mustMountRO,
SELinuxRelabel: relabelVolume,
Propagation: propagation,
})
@ -272,6 +271,12 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
// translateMountPropagation transforms v1.MountPropagationMode to
// runtimeapi.MountPropagation.
func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) {
if runtime.GOOS == "windows" {
// Windows containers doesn't support mount propagation, use private for it.
// Refer https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation.
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
}
if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) {
// mount propagation is disabled, use private as in the old versions
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
@ -445,7 +450,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
return nil, err
}
opts.Hostname = hostname
podName := volumehelper.GetUniquePodName(pod)
podName := volumeutil.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = kubecontainer.MakePortMappings(container)
@ -458,7 +463,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
blkutil := volumeutil.NewBlockVolumePathHandler()
blkutil := volumepathhandler.NewBlockVolumePathHandler()
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
if err != nil {
return nil, err
@ -814,7 +819,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k
if runningPod != nil {
p = *runningPod
} else if status != nil {
p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status)
p = kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), status)
} else {
return fmt.Errorf("one of the two arguments must be non-nil: runningPod, status")
}
@ -1109,22 +1114,6 @@ func (kl *Kubelet) podKiller() {
}
}
// hasHostPortConflicts detects pods with conflicted host ports.
func hasHostPortConflicts(pods []*v1.Pod) bool {
ports := sets.String{}
for _, pod := range pods {
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
}
return false
}
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
@ -1145,7 +1134,7 @@ func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodS
switch {
case previous:
if lastState.Terminated == nil {
if lastState.Terminated == nil || lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
@ -1154,9 +1143,21 @@ func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodS
cID = cStatus.ContainerID
case terminated != nil:
cID = terminated.ContainerID
// in cases where the next container didn't start, terminated.ContainerID will be empty, so get logs from the lastState.Terminated.
if terminated.ContainerID == "" {
if lastState.Terminated != nil && lastState.Terminated.ContainerID != "" {
cID = lastState.Terminated.ContainerID
} else {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
} else {
cID = terminated.ContainerID
}
case lastState.Terminated != nil:
if lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case waiting != nil:
@ -1231,10 +1232,8 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo
return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr)
}
// GetPhase returns the phase of a pod given its container info.
// This func is exported to simplify integration with 3rd party kubelet
// integrations like kubernetes-mesos.
func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
// getPhase returns the phase of a pod given its container info.
func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
initialized := 0
pendingInitialization := 0
failedInitialization := 0
@ -1364,7 +1363,16 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
// Assume info is ready to process
spec := &pod.Spec
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = GetPhase(spec, allStatus)
s.Phase = getPhase(spec, allStatus)
// Check for illegal phase transition
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
// API server shows terminal phase; transitions are not allowed
if s.Phase != pod.Status.Phase {
glog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s)
// Force back to phase from the API server
s.Phase = pod.Status.Phase
}
}
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
@ -1374,7 +1382,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
if _, oldPodScheduled := podutil.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
s.Conditions = append(s.Conditions, *oldPodScheduled)
}
podutil.UpdatePodCondition(&pod.Status, &v1.PodCondition{
podutil.UpdatePodCondition(s, &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})