vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -72,7 +72,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/credentialprovider:go_default_library",
"//pkg/credentialprovider/secrets:go_default_library",
"//pkg/features:go_default_library",

View File

@ -26,7 +26,6 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -191,24 +190,6 @@ func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.Runtim
return &kubecontainer.RuntimeStatus{Conditions: conditions}
}
// getSysctlsFromAnnotations gets sysctls and unsafeSysctls from annotations.
func getSysctlsFromAnnotations(annotations map[string]string) (map[string]string, error) {
apiSysctls, apiUnsafeSysctls, err := v1helper.SysctlsFromPodAnnotations(annotations)
if err != nil {
return nil, err
}
sysctls := make(map[string]string)
for _, c := range apiSysctls {
sysctls[c.Name] = c.Value
}
for _, c := range apiUnsafeSysctls {
sysctls[c.Name] = c.Value
}
return sysctls, nil
}
// getSeccompProfileFromAnnotations gets seccomp profile from annotations.
// It gets pod's profile if containerName is empty.
func (m *kubeGenericRuntimeManager) getSeccompProfileFromAnnotations(annotations map[string]string, containerName string) string {

View File

@ -56,46 +56,6 @@ func TestStableKey(t *testing.T) {
assert.NotEqual(t, oldKey, newKey)
}
// TestGetSystclsFromAnnotations tests the logic of getting sysctls from annotations.
func TestGetSystclsFromAnnotations(t *testing.T) {
tests := []struct {
annotations map[string]string
expectedSysctls map[string]string
}{{
annotations: map[string]string{
v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
},
expectedSysctls: map[string]string{
"kernel.shmmni": "32768",
"kernel.shmmax": "1000000000",
"knet.ipv4.route.min_pmtu": "1000",
},
}, {
annotations: map[string]string{
v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
},
expectedSysctls: map[string]string{
"kernel.shmmni": "32768",
"kernel.shmmax": "1000000000",
},
}, {
annotations: map[string]string{
v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
},
expectedSysctls: map[string]string{
"knet.ipv4.route.min_pmtu": "1000",
},
}}
for i, test := range tests {
actualSysctls, err := getSysctlsFromAnnotations(test.annotations)
assert.NoError(t, err, "TestCase[%d]", i)
assert.Len(t, actualSysctls, len(test.expectedSysctls), "TestCase[%d]", i)
assert.Equal(t, test.expectedSysctls, actualSysctls, "TestCase[%d]", i)
}
}
func TestToKubeContainer(t *testing.T) {
c := &runtimeapi.Container{
Id: "test-id",
@ -231,20 +191,35 @@ func TestGetSeccompProfileFromAnnotations(t *testing.T) {
containerName: "container1",
expectedProfile: "",
},
{
description: "pod runtime/default seccomp profile should return runtime/default",
annotation: map[string]string{
v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
},
expectedProfile: v1.SeccompProfileRuntimeDefault,
},
{
description: "pod docker/default seccomp profile should return docker/default",
annotation: map[string]string{
v1.SeccompPodAnnotationKey: "docker/default",
v1.SeccompPodAnnotationKey: v1.DeprecatedSeccompProfileDockerDefault,
},
expectedProfile: "docker/default",
expectedProfile: v1.DeprecatedSeccompProfileDockerDefault,
},
{
description: "pod runtime/default seccomp profile with containerName should return runtime/default",
annotation: map[string]string{
v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
},
containerName: "container1",
expectedProfile: v1.SeccompProfileRuntimeDefault,
},
{
description: "pod docker/default seccomp profile with containerName should return docker/default",
annotation: map[string]string{
v1.SeccompPodAnnotationKey: "docker/default",
v1.SeccompPodAnnotationKey: v1.DeprecatedSeccompProfileDockerDefault,
},
containerName: "container1",
expectedProfile: "docker/default",
expectedProfile: v1.DeprecatedSeccompProfileDockerDefault,
},
{
description: "pod unconfined seccomp profile should return unconfined",

View File

@ -106,11 +106,15 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
restartCount = containerStatus.RestartCount + 1
}
containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, containerType)
containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, containerType)
if cleanupAction != nil {
defer cleanupAction()
}
if err != nil {
m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", grpc.ErrorDesc(err))
return grpc.ErrorDesc(err), ErrCreateContainerConfig
}
containerID, err := m.runtimeService.CreateContainer(podSandboxID, containerConfig, podSandboxConfig)
if err != nil {
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", grpc.ErrorDesc(err))
@ -146,9 +150,15 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
legacySymlink := legacyLogSymlink(containerID, containerMeta.Name, sandboxMeta.Name,
sandboxMeta.Namespace)
containerLog := filepath.Join(podSandboxConfig.LogDirectory, containerConfig.LogPath)
if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
glog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v",
legacySymlink, containerID, containerLog, err)
// only create legacy symlink if containerLog path exists (or the error is not IsNotExist).
// Because if containerLog path does not exist, only dandling legacySymlink is created.
// This dangling legacySymlink is later removed by container gc, so it does not make sense
// to create it in the first place. it happens when journald logging driver is used with docker.
if _, err := m.osInterface.Stat(containerLog); !os.IsNotExist(err) {
if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
glog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v",
legacySymlink, containerID, containerLog, err)
}
}
// Step 4: execute the post start hook.
@ -172,27 +182,27 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
}
// generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, containerType kubecontainer.ContainerType) (*runtimeapi.ContainerConfig, error) {
opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, containerType kubecontainer.ContainerType) (*runtimeapi.ContainerConfig, func(), error) {
opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
if err != nil {
return nil, err
return nil, nil, err
}
uid, username, err := m.getImageUser(container.Image)
if err != nil {
return nil, err
return nil, cleanupAction, err
}
// Verify RunAsNonRoot. Non-root verification only supports numeric user.
if err := verifyRunAsNonRoot(pod, container, uid, username); err != nil {
return nil, err
return nil, cleanupAction, err
}
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
logDir := BuildContainerLogsDirectory(kubetypes.UID(pod.UID), container.Name)
err = m.osInterface.MkdirAll(logDir, 0755)
if err != nil {
return nil, fmt.Errorf("create container log directory for container %s failed: %v", container.Name, err)
return nil, cleanupAction, fmt.Errorf("create container log directory for container %s failed: %v", container.Name, err)
}
containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
restartCountUint32 := uint32(restartCount)
@ -217,7 +227,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
// set platform specific configurations.
if err := m.applyPlatformSpecificContainerConfig(config, container, pod, uid, username); err != nil {
return nil, err
return nil, cleanupAction, err
}
// set environment variables
@ -231,7 +241,7 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Contai
}
config.Envs = envs
return config, nil
return config, cleanupAction, nil
}
// makeDevices generates container devices for kubelet runtime v1.
@ -726,10 +736,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID ku
glog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err)
return fmt.Errorf("Unable to retrieve container logs for %v", containerID.String())
}
labeledInfo := getContainerInfoFromLabels(status.Labels)
annotatedInfo := getContainerInfoFromAnnotations(status.Annotations)
path := buildFullContainerLogsPath(pod.UID, labeledInfo.ContainerName, annotatedInfo.RestartCount)
return m.ReadLogs(path, containerID.ID, logOptions, stdout, stderr)
return m.ReadLogs(status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
}
// GetExec gets the endpoint the runtime will serve the exec request from.

View File

@ -32,7 +32,7 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde
container := &pod.Spec.Containers[containerIndex]
podIP := ""
restartCount := 0
opts, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
containerLogsPath := buildContainerLogsPath(container.Name, restartCount)
restartCountUint32 := uint32(restartCount)
envs := make([]*runtimeapi.KeyValue, len(opts.Envs))
@ -64,6 +64,8 @@ func TestGenerateContainerConfig(t *testing.T) {
_, imageService, m, err := createTestRuntimeManager()
assert.NoError(t, err)
runAsUser := int64(1000)
runAsGroup := int64(2000)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
@ -78,17 +80,23 @@ func TestGenerateContainerConfig(t *testing.T) {
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{"testCommand"},
WorkingDir: "testWorkingDir",
SecurityContext: &v1.SecurityContext{
RunAsUser: &runAsUser,
RunAsGroup: &runAsGroup,
},
},
},
},
}
expectedConfig := makeExpectedConfig(m, pod, 0)
containerConfig, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
assert.NoError(t, err)
assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.")
assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set")
assert.Equal(t, runAsGroup, containerConfig.GetLinux().GetSecurityContext().GetRunAsGroup().GetValue(), "RunAsGroup should be set")
runAsUser := int64(0)
runAsRoot := int64(0)
runAsNonRootTrue := true
podWithContainerSecurityContext := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -106,14 +114,14 @@ func TestGenerateContainerConfig(t *testing.T) {
WorkingDir: "testWorkingDir",
SecurityContext: &v1.SecurityContext{
RunAsNonRoot: &runAsNonRootTrue,
RunAsUser: &runAsUser,
RunAsUser: &runAsRoot,
},
},
},
},
}
_, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
assert.Error(t, err)
imageId, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil)
@ -125,6 +133,6 @@ func TestGenerateContainerConfig(t *testing.T) {
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil
podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue
_, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username")
}

View File

@ -19,24 +19,32 @@ limitations under the License.
package kuberuntime
import (
"fmt"
"github.com/docker/docker/pkg/sysinfo"
"k8s.io/api/core/v1"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/securitycontext"
)
// applyPlatformSpecificContainerConfig applies platform specific configurations to runtimeapi.ContainerConfig.
func (m *kubeGenericRuntimeManager) applyPlatformSpecificContainerConfig(config *runtimeapi.ContainerConfig, container *v1.Container, pod *v1.Pod, uid *int64, username string) error {
config.Windows = m.generateWindowsContainerConfig(container, pod, uid, username)
windowsConfig, err := m.generateWindowsContainerConfig(container, pod, uid, username)
if err != nil {
return err
}
config.Windows = windowsConfig
return nil
}
// generateWindowsContainerConfig generates windows container config for kubelet runtime v1.
// Refer https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/cri-windows.md.
func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) *runtimeapi.WindowsContainerConfig {
func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username string) (*runtimeapi.WindowsContainerConfig, error) {
wc := &runtimeapi.WindowsContainerConfig{
Resources: &runtimeapi.WindowsContainerResources{},
Resources: &runtimeapi.WindowsContainerResources{},
SecurityContext: &runtimeapi.WindowsContainerSecurityContext{},
}
cpuRequest := container.Resources.Requests.Cpu()
@ -77,5 +85,15 @@ func (m *kubeGenericRuntimeManager) generateWindowsContainerConfig(container *v1
wc.Resources.MemoryLimitInBytes = memoryLimit
}
return wc
// setup security context
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
// RunAsUser only supports int64 from Kubernetes API, but Windows containers only support username.
if effectiveSc.RunAsUser != nil {
return nil, fmt.Errorf("run as uid (%d) is not supported on Windows", *effectiveSc.RunAsUser)
}
if username != "" {
wc.SecurityContext.RunAsUsername = username
}
return wc, nil
}

View File

@ -96,6 +96,28 @@ func TestSandboxGC(t *testing.T) {
remain: []int{0, 1},
evictTerminatedPods: false,
},
{
description: "older exited sandboxes without containers for existing pods should be garbage collected if there are more than one exited sandboxes.",
sandboxes: []sandboxTemplate{
makeGCSandbox(pods[0], 1, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, 1),
makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, 0),
},
containers: []containerTemplate{},
remain: []int{0},
evictTerminatedPods: false,
},
{
description: "older exited sandboxes with containers for existing pods should not be garbage collected even if there are more than one exited sandboxes.",
sandboxes: []sandboxTemplate{
makeGCSandbox(pods[0], 1, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, 1),
makeGCSandbox(pods[0], 0, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, true, 0),
},
containers: []containerTemplate{
{pod: pods[0], container: &pods[0].Spec.Containers[0], sandboxAttempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
},
remain: []int{0, 1},
evictTerminatedPods: false,
},
{
description: "non-running sandboxes for existing pods should be garbage collected if evictTerminatedPods is set.",
sandboxes: []sandboxTemplate{
@ -187,6 +209,7 @@ func TestContainerGC(t *testing.T) {
policy *kubecontainer.ContainerGCPolicy // container gc policy
remain []int // template indexes of remaining containers
evictTerminatedPods bool
allSourcesReady bool
}{
{
description: "all containers should be removed when max container limit is 0",
@ -196,6 +219,7 @@ func TestContainerGC(t *testing.T) {
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0},
remain: []int{},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "max containers should be complied when no max per pod container limit is set",
@ -209,6 +233,7 @@ func TestContainerGC(t *testing.T) {
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4},
remain: []int{0, 1, 2, 3},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "no containers should be removed if both max container and per pod container limits are not set",
@ -220,6 +245,7 @@ func TestContainerGC(t *testing.T) {
policy: &kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1},
remain: []int{0, 1, 2},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "recently started containers should not be removed",
@ -230,6 +256,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 1, 2},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "oldest containers should be removed when per pod container limit exceeded",
@ -240,6 +267,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 1},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "running containers should not be removed",
@ -250,6 +278,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 1, 2},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "no containers should be removed when limits are not exceeded",
@ -259,6 +288,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 1},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "max container count should apply per (UID, container) pair",
@ -275,6 +305,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 1, 3, 4, 6, 7},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "max limit should apply and try to keep from every pod",
@ -292,6 +323,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 2, 4, 6, 8},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "oldest pods should be removed if limit exceeded",
@ -309,6 +341,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 2, 4, 6, 8, 9},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "all non-running containers should be removed when evictTerminatedPods is set",
@ -322,6 +355,7 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{4, 5},
evictTerminatedPods: true,
allSourcesReady: true,
},
{
description: "containers for deleted pods should be removed",
@ -335,6 +369,16 @@ func TestContainerGC(t *testing.T) {
},
remain: []int{0, 1, 2},
evictTerminatedPods: false,
allSourcesReady: true,
},
{
description: "containers for deleted pods may not be removed if allSourcesReady is set false ",
containers: []containerTemplate{
makeGCContainer("deleted", "bar1", 0, 0, runtimeapi.ContainerState_CONTAINER_EXITED),
},
remain: []int{0},
evictTerminatedPods: true,
allSourcesReady: false,
},
} {
t.Logf("TestCase #%d: %+v", c, test)
@ -344,7 +388,7 @@ func TestContainerGC(t *testing.T) {
if test.policy == nil {
test.policy = &defaultGCPolicy
}
err := m.containerGC.evictContainers(*test.policy, true, test.evictTerminatedPods)
err := m.containerGC.evictContainers(*test.policy, test.allSourcesReady, test.evictTerminatedPods)
assert.NoError(t, err)
realRemain, err := fakeRuntime.ListContainers(nil)
assert.NoError(t, err)

View File

@ -120,7 +120,7 @@ type kubeGenericRuntimeManager struct {
type KubeGenericRuntime interface {
kubecontainer.Runtime
kubecontainer.IndirectStreamingRuntime
kubecontainer.StreamingRuntime
kubecontainer.ContainerCommandRunner
}

View File

@ -143,7 +143,7 @@ func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template cont
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
containerConfig, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, template.containerType)
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, template.containerType)
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)

View File

@ -25,6 +25,8 @@ import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types"
@ -134,10 +136,15 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (
},
}
sysctls, err := getSysctlsFromAnnotations(pod.Annotations)
if err != nil {
return nil, fmt.Errorf("failed to get sysctls from annotations %v for pod %q: %v", pod.Annotations, format.Pod(pod), err)
sysctls := make(map[string]string)
if utilfeature.DefaultFeatureGate.Enabled(features.Sysctls) {
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
@ -145,6 +152,9 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (
if sc.RunAsUser != nil {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
lc.SecurityContext.NamespaceOptions = namespacesForPod(pod)
if sc.FSGroup != nil {

View File

@ -45,3 +45,14 @@ func TestLogSymLink(t *testing.T) {
expectedPath := path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId)[:251]+".log")
as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, dockerId))
}
func TestLegacyLogSymLink(t *testing.T) {
as := assert.New(t)
containerID := randStringBytes(80)
containerName := randStringBytes(70)
podName := randStringBytes(128)
podNamespace := randStringBytes(10)
// The file name cannot exceed 255 characters. Since .log suffix is required, the prefix cannot exceed 251 characters.
expectedPath := path.Join(legacyContainerLogsDir, fmt.Sprintf("%s_%s_%s-%s", podName, podNamespace, containerName, containerID)[:251]+".log")
as.Equal(expectedPath, legacyLogSymlink(containerID, containerName, podName, podNamespace))
}

View File

@ -354,9 +354,28 @@ func ReadLogs(path, containerID string, opts *LogOptions, runtimeService interna
}
}
func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
s, err := r.ContainerStatus(id)
if err != nil {
return false, err
}
// Only keep following container log when it is running.
if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
glog.V(5).Infof("Container %q is not running (state=%q)", id, s.State)
// Do not return error because it's normal that the container stops
// during waiting.
return false, nil
}
return true, nil
}
// waitLogs wait for the next log write. It returns a boolean and an error. The boolean
// indicates whether a new log is found; the error is error happens during waiting new logs.
func waitLogs(id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, error) {
// no need to wait if the pod is not running
if running, err := isContainerRunning(id, runtimeService); !running {
return false, err
}
errRetry := 5
for {
select {
@ -374,17 +393,9 @@ func waitLogs(id string, w *fsnotify.Watcher, runtimeService internalapi.Runtime
}
errRetry--
case <-time.After(stateCheckPeriod):
s, err := runtimeService.ContainerStatus(id)
if err != nil {
if running, err := isContainerRunning(id, runtimeService); !running {
return false, err
}
// Only keep following container log when it is running.
if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
glog.Errorf("Container %q is not running (state=%q)", id, s.State)
// Do not return error because it's normal that the container stops
// during waiting.
return false, nil
}
}
}
}

View File

@ -108,6 +108,9 @@ func convertToRuntimeSecurityContext(securityContext *v1.SecurityContext) *runti
if securityContext.RunAsUser != nil {
sc.RunAsUser = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsUser)}
}
if securityContext.RunAsGroup != nil {
sc.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*securityContext.RunAsGroup)}
}
if securityContext.Privileged != nil {
sc.Privileged = *securityContext.Privileged
}