mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
108
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD
generated
vendored
108
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/BUILD
generated
vendored
@ -12,8 +12,14 @@ go_library(
|
||||
"doc.go",
|
||||
"fake_kuberuntime_manager.go",
|
||||
"helpers.go",
|
||||
"helpers_linux.go",
|
||||
"helpers_unsupported.go",
|
||||
"helpers_windows.go",
|
||||
"instrumented_services.go",
|
||||
"kuberuntime_container.go",
|
||||
"kuberuntime_container_linux.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
"kuberuntime_container_windows.go",
|
||||
"kuberuntime_gc.go",
|
||||
"kuberuntime_image.go",
|
||||
"kuberuntime_logs.go",
|
||||
@ -22,53 +28,7 @@ go_library(
|
||||
"labels.go",
|
||||
"legacy.go",
|
||||
"security_context.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"helpers_linux.go",
|
||||
"kuberuntime_container_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"helpers_unsupported.go",
|
||||
"kuberuntime_container_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"helpers_windows.go",
|
||||
"kuberuntime_container_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
@ -85,6 +45,7 @@ go_library(
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/cache:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
@ -93,21 +54,21 @@ go_library(
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//pkg/util/selinux:go_default_library",
|
||||
"//pkg/util/tail:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/github.com/armon/circbuf:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
@ -123,8 +84,10 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"helpers_linux_test.go",
|
||||
"helpers_test.go",
|
||||
"instrumented_services_test.go",
|
||||
"kuberuntime_container_linux_test.go",
|
||||
"kuberuntime_container_test.go",
|
||||
"kuberuntime_gc_test.go",
|
||||
"kuberuntime_image_test.go",
|
||||
@ -132,13 +95,9 @@ go_test(
|
||||
"kuberuntime_sandbox_test.go",
|
||||
"labels_test.go",
|
||||
"legacy_test.go",
|
||||
"main_test.go",
|
||||
"security_context_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"kuberuntime_container_linux_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/credentialprovider:go_default_library",
|
||||
@ -149,19 +108,22 @@ go_test(
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/github.com/golang/mock/gomock:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
@ -69,11 +70,12 @@ func (f *fakePodStateProvider) IsPodTerminated(uid types.UID) bool {
|
||||
return !found
|
||||
}
|
||||
|
||||
func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) {
|
||||
func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) {
|
||||
recorder := &record.FakeRecorder{}
|
||||
kubeRuntimeManager := &kubeGenericRuntimeManager{
|
||||
recorder: recorder,
|
||||
cpuCFSQuota: false,
|
||||
cpuCFSQuotaPeriod: metav1.Duration{Duration: time.Microsecond * 100},
|
||||
livenessManager: proberesults.NewManager(),
|
||||
containerRefManager: kubecontainer.NewRefManager(),
|
||||
machineInfo: machineInfo,
|
||||
@ -91,7 +93,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager)
|
||||
kubeRuntimeManager.containerGC = newContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager)
|
||||
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
|
||||
kubeRuntimeManager.imagePuller = images.NewImageManager(
|
||||
kubecontainer.FilterEventRecorder(recorder),
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go
generated
vendored
@ -22,10 +22,10 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -79,9 +79,11 @@ func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol {
|
||||
return runtimeapi.Protocol_TCP
|
||||
case v1.ProtocolUDP:
|
||||
return runtimeapi.Protocol_UDP
|
||||
case v1.ProtocolSCTP:
|
||||
return runtimeapi.Protocol_SCTP
|
||||
}
|
||||
|
||||
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
|
||||
klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
|
||||
return runtimeapi.Protocol_TCP
|
||||
}
|
||||
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux.go
generated
vendored
@ -18,6 +18,11 @@ limitations under the License.
|
||||
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
// Taken from lmctfy https://github.com/google/lmctfy/blob/master/lmctfy/controllers/cpu_controller.cc
|
||||
minShares = 2
|
||||
@ -25,7 +30,7 @@ const (
|
||||
milliCPUToCPU = 1000
|
||||
|
||||
// 100000 is equivalent to 100ms
|
||||
quotaPeriod = 100 * minQuotaPeriod
|
||||
quotaPeriod = 100000
|
||||
minQuotaPeriod = 1000
|
||||
)
|
||||
|
||||
@ -44,21 +49,22 @@ func milliCPUToShares(milliCPU int64) int64 {
|
||||
}
|
||||
|
||||
// milliCPUToQuota converts milliCPU to CFS quota and period values
|
||||
func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
|
||||
func milliCPUToQuota(milliCPU int64, period int64) (quota int64) {
|
||||
// CFS quota is measured in two values:
|
||||
// - cfs_period_us=100ms (the amount of time to measure usage across)
|
||||
// - cfs_quota=20ms (the amount of cpu time allowed to be used across a period)
|
||||
// so in the above example, you are limited to 20% of a single CPU
|
||||
// for multi-cpu environments, you just scale equivalent amounts
|
||||
// see https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt for details
|
||||
if milliCPU == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// we set the period to 100ms by default
|
||||
period = quotaPeriod
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUCFSQuotaPeriod) {
|
||||
period = quotaPeriod
|
||||
}
|
||||
|
||||
// we then convert your milliCPU to a value normalized over a period
|
||||
quota = (milliCPU * quotaPeriod) / milliCPUToCPU
|
||||
quota = (milliCPU * period) / milliCPUToCPU
|
||||
|
||||
// quota needs to be a minimum of 1ms.
|
||||
if quota < minQuotaPeriod {
|
||||
|
204
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux_test.go
generated
vendored
Normal file
204
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers_linux_test.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMilliCPUToQuota(t *testing.T) {
|
||||
for _, testCase := range []struct {
|
||||
msg string
|
||||
input int64
|
||||
expected int64
|
||||
period uint64
|
||||
}{
|
||||
{
|
||||
msg: "all-zero",
|
||||
input: int64(0),
|
||||
expected: int64(0),
|
||||
period: uint64(0),
|
||||
},
|
||||
{
|
||||
msg: "5 input default quota and period",
|
||||
input: int64(5),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "9 input default quota and period",
|
||||
input: int64(9),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "10 input default quota and period",
|
||||
input: int64(10),
|
||||
expected: int64(1000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 20k quota and default period",
|
||||
input: int64(200),
|
||||
expected: int64(20000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 50k quota and default period",
|
||||
input: int64(500),
|
||||
expected: int64(50000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1k input 100k quota and default period",
|
||||
input: int64(1000),
|
||||
expected: int64(100000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 150k quota and default period",
|
||||
input: int64(1500),
|
||||
expected: int64(150000),
|
||||
period: uint64(100000),
|
||||
}} {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
quota := milliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.expected {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.expected, quota)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMilliCPUToQuotaWithCustomCPUCFSQuotaPeriod(t *testing.T) {
|
||||
utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, true)
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CPUCFSQuotaPeriod, false)
|
||||
|
||||
for _, testCase := range []struct {
|
||||
msg string
|
||||
input int64
|
||||
expected int64
|
||||
period uint64
|
||||
}{
|
||||
{
|
||||
msg: "all-zero",
|
||||
input: int64(0),
|
||||
expected: int64(0),
|
||||
period: uint64(0),
|
||||
},
|
||||
{
|
||||
msg: "5 input default quota and period",
|
||||
input: int64(5),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "9 input default quota and period",
|
||||
input: int64(9),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "10 input default quota and period",
|
||||
input: int64(10),
|
||||
expected: minQuotaPeriod,
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 20k quota and default period",
|
||||
input: int64(200),
|
||||
expected: int64(20000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 50k quota and default period",
|
||||
input: int64(500),
|
||||
expected: int64(50000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1k input 100k quota and default period",
|
||||
input: int64(1000),
|
||||
expected: int64(100000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 150k quota and default period",
|
||||
input: int64(1500),
|
||||
expected: int64(150000),
|
||||
period: uint64(100000),
|
||||
},
|
||||
{
|
||||
msg: "5 input 10k period and default quota expected",
|
||||
input: int64(5),
|
||||
period: uint64(10000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "5 input 5k period and default quota expected",
|
||||
input: int64(5),
|
||||
period: uint64(5000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "9 input 10k period and default quota expected",
|
||||
input: int64(9),
|
||||
period: uint64(10000),
|
||||
expected: minQuotaPeriod,
|
||||
},
|
||||
{
|
||||
msg: "10 input 200k period and 2000 quota expected",
|
||||
input: int64(10),
|
||||
period: uint64(200000),
|
||||
expected: int64(2000),
|
||||
},
|
||||
{
|
||||
msg: "200 input 200k period and 40k quota",
|
||||
input: int64(200),
|
||||
period: uint64(200000),
|
||||
expected: int64(40000),
|
||||
},
|
||||
{
|
||||
msg: "500 input 20k period and 20k expected quota",
|
||||
input: int64(500),
|
||||
period: uint64(20000),
|
||||
expected: int64(10000),
|
||||
},
|
||||
{
|
||||
msg: "1000 input 10k period and 10k expected quota",
|
||||
input: int64(1000),
|
||||
period: uint64(10000),
|
||||
expected: int64(10000),
|
||||
},
|
||||
{
|
||||
msg: "1500 input 5000 period and 7500 expected quota",
|
||||
input: int64(1500),
|
||||
period: uint64(5000),
|
||||
expected: int64(7500),
|
||||
}} {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
quota := milliCPUToQuota(testCase.input, int64(testCase.period))
|
||||
if quota != testCase.expected {
|
||||
t.Errorf("Input %v and %v, expected quota %v, but got quota %v", testCase.input, testCase.period, testCase.expected, quota)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/instrumented_services.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/instrumented_services.go
generated
vendored
@ -176,11 +176,11 @@ func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*run
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) {
|
||||
const operation = "run_podsandbox"
|
||||
defer recordOperation(operation, time.Now())
|
||||
|
||||
out, err := in.service.RunPodSandbox(config)
|
||||
out, err := in.service.RunPodSandbox(config, runtimeHandler)
|
||||
recordError(operation, err)
|
||||
return out, err
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/instrumented_services_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/instrumented_services_test.go
generated
vendored
@ -38,7 +38,7 @@ func TestRecordOperation(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
defer l.Close()
|
||||
|
||||
prometheusUrl := "http://" + temporalServer + "/metrics"
|
||||
prometheusURL := "http://" + temporalServer + "/metrics"
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
server := &http.Server{
|
||||
@ -55,11 +55,11 @@ func TestRecordOperation(t *testing.T) {
|
||||
|
||||
assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.ServeHTTP(w, r)
|
||||
}), "GET", prometheusUrl, nil, runtimeOperationsCounterExpected)
|
||||
}), "GET", prometheusURL, nil, runtimeOperationsCounterExpected)
|
||||
|
||||
assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.ServeHTTP(w, r)
|
||||
}), "GET", prometheusUrl, nil, runtimeOperationsLatencyExpected)
|
||||
}), "GET", prometheusURL, nil, runtimeOperationsLatencyExpected)
|
||||
}
|
||||
|
||||
func TestInstrumentedVersion(t *testing.T) {
|
||||
|
70
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
70
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -32,7 +33,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/armon/circbuf"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -49,9 +50,14 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCreateContainerConfig - failed to create container config
|
||||
ErrCreateContainerConfig = errors.New("CreateContainerConfigError")
|
||||
ErrCreateContainer = errors.New("CreateContainerError")
|
||||
ErrPostStartHook = errors.New("PostStartHookError")
|
||||
// ErrCreateContainer - failed to create container
|
||||
ErrCreateContainer = errors.New("CreateContainerError")
|
||||
// ErrPreStartHook - failed to execute PreStartHook
|
||||
ErrPreStartHook = errors.New("PreStartHookError")
|
||||
// ErrPostStartHook - failed to execute PostStartHook
|
||||
ErrPostStartHook = errors.New("PostStartHookError")
|
||||
)
|
||||
|
||||
// recordContainerEvent should be used by the runtime manager for all container related events.
|
||||
@ -62,7 +68,7 @@ var (
|
||||
func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container *v1.Container, containerID, eventType, reason, message string, args ...interface{}) {
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, container)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err)
|
||||
klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err)
|
||||
return
|
||||
}
|
||||
eventMessage := message
|
||||
@ -95,9 +101,9 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
// Step 2: create the container.
|
||||
ref, err := kubecontainer.GenerateContainerRef(pod, container)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err)
|
||||
klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err)
|
||||
}
|
||||
glog.V(4).Infof("Generating ref for container %s: %#v", container.Name, ref)
|
||||
klog.V(4).Infof("Generating ref for container %s: %#v", container.Name, ref)
|
||||
|
||||
// For a new container, the RestartCount should be 0
|
||||
restartCount := 0
|
||||
@ -122,8 +128,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
}
|
||||
err = m.internalLifecycle.PreStartContainer(pod, container, containerID)
|
||||
if err != nil {
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", err)
|
||||
return "Internal PreStartContainer hook failed", err
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Internal PreStartContainer hook failed: %v", grpc.ErrorDesc(err))
|
||||
return grpc.ErrorDesc(err), ErrPreStartHook
|
||||
}
|
||||
m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, "Created container")
|
||||
|
||||
@ -156,7 +162,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
// to create it in the first place. it happens when journald logging driver is used with docker.
|
||||
if _, err := m.osInterface.Stat(containerLog); !os.IsNotExist(err) {
|
||||
if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil {
|
||||
glog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v",
|
||||
klog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v",
|
||||
legacySymlink, containerID, containerLog, err)
|
||||
}
|
||||
}
|
||||
@ -171,7 +177,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
|
||||
if handlerErr != nil {
|
||||
m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, msg)
|
||||
if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", nil); err != nil {
|
||||
glog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v",
|
||||
klog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v",
|
||||
container.Name, kubeContainerID.String(), format.Pod(pod), ErrPostStartHook, err)
|
||||
}
|
||||
return msg, fmt.Errorf("%s: %v", ErrPostStartHook, handlerErr)
|
||||
@ -326,7 +332,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]
|
||||
|
||||
containers, err := m.runtimeService.ListContainers(filter)
|
||||
if err != nil {
|
||||
glog.Errorf("getKubeletContainers failed: %v", err)
|
||||
klog.Errorf("getKubeletContainers failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -366,7 +372,7 @@ func getTerminationMessage(status *runtimeapi.ContainerStatus, terminationMessag
|
||||
func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) string {
|
||||
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
if err := m.ReadLogs(path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
if err := m.ReadLogs(context.Background(), path, "", &v1.PodLogOptions{TailLines: &value}, buf, buf); err != nil {
|
||||
return fmt.Sprintf("Error on reading termination message from logs: %v", err)
|
||||
}
|
||||
return buf.String()
|
||||
@ -379,7 +385,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)},
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("ListContainers error: %v", err)
|
||||
klog.Errorf("ListContainers error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -388,7 +394,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
|
||||
for i, c := range containers {
|
||||
status, err := m.runtimeService.ContainerStatus(c.Id)
|
||||
if err != nil {
|
||||
glog.Errorf("ContainerStatus for %s error: %v", c.Id, err)
|
||||
klog.Errorf("ContainerStatus for %s error: %v", c.Id, err)
|
||||
return nil, err
|
||||
}
|
||||
cStatus := toKubeContainerStatus(status, m.runtimeName)
|
||||
@ -455,7 +461,7 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin
|
||||
|
||||
// executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes.
|
||||
func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 {
|
||||
glog.V(3).Infof("Running preStop hook for container %q", containerID.String())
|
||||
klog.V(3).Infof("Running preStop hook for container %q", containerID.String())
|
||||
|
||||
start := metav1.Now()
|
||||
done := make(chan struct{})
|
||||
@ -463,16 +469,16 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID
|
||||
defer close(done)
|
||||
defer utilruntime.HandleCrash()
|
||||
if msg, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil {
|
||||
glog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err)
|
||||
klog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err)
|
||||
m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeWarning, events.FailedPreStopHook, msg)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Duration(gracePeriod) * time.Second):
|
||||
glog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", containerID, gracePeriod)
|
||||
klog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", containerID, gracePeriod)
|
||||
case <-done:
|
||||
glog.V(3).Infof("preStop hook for container %q completed", containerID)
|
||||
klog.V(3).Infof("preStop hook for container %q completed", containerID)
|
||||
}
|
||||
|
||||
return int64(metav1.Now().Sub(start.Time).Seconds())
|
||||
@ -510,8 +516,8 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
|
||||
},
|
||||
}
|
||||
container = &v1.Container{
|
||||
Name: l.ContainerName,
|
||||
Ports: a.ContainerPorts,
|
||||
Name: l.ContainerName,
|
||||
Ports: a.ContainerPorts,
|
||||
TerminationMessagePath: a.TerminationMessagePath,
|
||||
}
|
||||
if a.PreStopHandler != nil {
|
||||
@ -550,7 +556,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
|
||||
gracePeriod = *pod.Spec.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod)
|
||||
klog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod)
|
||||
|
||||
// Run internal pre-stop lifecycle hook
|
||||
if err := m.internalLifecycle.PreStopContainer(containerID.ID); err != nil {
|
||||
@ -567,14 +573,14 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec
|
||||
}
|
||||
if gracePeriodOverride != nil {
|
||||
gracePeriod = *gracePeriodOverride
|
||||
glog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod)
|
||||
klog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod)
|
||||
}
|
||||
|
||||
err := m.runtimeService.StopContainer(containerID.ID, gracePeriod)
|
||||
if err != nil {
|
||||
glog.Errorf("Container %q termination failed with gracePeriod %d: %v", containerID.String(), gracePeriod, err)
|
||||
klog.Errorf("Container %q termination failed with gracePeriod %d: %v", containerID.String(), gracePeriod, err)
|
||||
} else {
|
||||
glog.V(3).Infof("Container %q exited normally", containerID.String())
|
||||
klog.V(3).Infof("Container %q exited normally", containerID.String())
|
||||
}
|
||||
|
||||
message := fmt.Sprintf("Killing container with id %s", containerID.String())
|
||||
@ -637,7 +643,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod,
|
||||
continue
|
||||
}
|
||||
// prune all other init containers that match this container name
|
||||
glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count)
|
||||
klog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count)
|
||||
if err := m.removeContainer(status.ID.ID); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
|
||||
continue
|
||||
@ -647,7 +653,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod,
|
||||
if _, ok := m.containerRefManager.GetRef(status.ID); ok {
|
||||
m.containerRefManager.ClearRef(status.ID)
|
||||
} else {
|
||||
glog.Warningf("No ref for container %q", status.ID)
|
||||
klog.Warningf("No ref for container %q", status.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -669,7 +675,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *
|
||||
}
|
||||
count++
|
||||
// Purge all init containers that match this container name
|
||||
glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count)
|
||||
klog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count)
|
||||
if err := m.removeContainer(status.ID.ID); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod)))
|
||||
continue
|
||||
@ -678,7 +684,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *
|
||||
if _, ok := m.containerRefManager.GetRef(status.ID); ok {
|
||||
m.containerRefManager.ClearRef(status.ID)
|
||||
} else {
|
||||
glog.Warningf("No ref for container %q", status.ID)
|
||||
klog.Warningf("No ref for container %q", status.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -730,13 +736,13 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus)
|
||||
}
|
||||
|
||||
// GetContainerLogs returns logs of a specific container.
|
||||
func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||
func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
|
||||
status, err := m.runtimeService.ContainerStatus(containerID.ID)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err)
|
||||
klog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err)
|
||||
return fmt.Errorf("Unable to retrieve container logs for %v", containerID.String())
|
||||
}
|
||||
return m.ReadLogs(status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
|
||||
return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr)
|
||||
}
|
||||
|
||||
// GetExec gets the endpoint the runtime will serve the exec request from.
|
||||
@ -789,7 +795,7 @@ func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID,
|
||||
// Notice that we assume that the container should only be removed in non-running state, and
|
||||
// it will not write container logs anymore in that state.
|
||||
func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error {
|
||||
glog.V(4).Infof("Removing container %q", containerID)
|
||||
klog.V(4).Infof("Removing container %q", containerID)
|
||||
// Call internal container post-stop lifecycle hook.
|
||||
if err := m.internalLifecycle.PostStopContainer(containerID); err != nil {
|
||||
return err
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux.go
generated
vendored
@ -19,6 +19,8 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
@ -65,7 +67,8 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
|
||||
if m.cpuCFSQuota {
|
||||
// if cpuLimit.Amount is nil, then the appropriate default value is returned
|
||||
// to allow full usage of cpu resource.
|
||||
cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue())
|
||||
cpuPeriod := int64(m.cpuCFSQuotaPeriod.Duration / time.Microsecond)
|
||||
cpuQuota := milliCPUToQuota(cpuLimit.MilliValue(), cpuPeriod)
|
||||
lc.Resources.CpuQuota = cpuQuota
|
||||
lc.Resources.CpuPeriod = cpuPeriod
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go
generated
vendored
@ -124,8 +124,8 @@ func TestGenerateContainerConfig(t *testing.T) {
|
||||
_, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular)
|
||||
assert.Error(t, err)
|
||||
|
||||
imageId, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil)
|
||||
image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageId})
|
||||
imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil)
|
||||
image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID})
|
||||
|
||||
image.Uid = nil
|
||||
image.Username = "test"
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_test.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_container_test.go
generated
vendored
@ -56,17 +56,17 @@ func TestRemoveContainer(t *testing.T) {
|
||||
_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
assert.Equal(t, len(fakeContainers), 1)
|
||||
|
||||
containerId := fakeContainers[0].Id
|
||||
containerID := fakeContainers[0].Id
|
||||
fakeOS := m.osInterface.(*containertest.FakeOS)
|
||||
err = m.removeContainer(containerId)
|
||||
err = m.removeContainer(containerID)
|
||||
assert.NoError(t, err)
|
||||
// Verify container log is removed
|
||||
expectedContainerLogPath := filepath.Join(podLogsRootDirectory, "12345678", "foo", "0.log")
|
||||
expectedContainerLogSymlink := legacyLogSymlink(containerId, "foo", "bar", "new")
|
||||
expectedContainerLogSymlink := legacyLogSymlink(containerID, "foo", "bar", "new")
|
||||
assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink})
|
||||
// Verify container is removed
|
||||
assert.Contains(t, fakeRuntime.Called, "RemoveContainer")
|
||||
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerId})
|
||||
containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerID})
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, containers)
|
||||
}
|
||||
@ -257,10 +257,10 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeRunner := &containertest.FakeContainerCommandRunner{}
|
||||
fakeHttp := &fakeHTTP{}
|
||||
fakeHTTP := &fakeHTTP{}
|
||||
|
||||
lcHanlder := lifecycle.NewHandlerRunner(
|
||||
fakeHttp,
|
||||
fakeHTTP,
|
||||
fakeRunner,
|
||||
nil)
|
||||
|
||||
@ -277,11 +277,11 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
|
||||
// Configured and working HTTP hook
|
||||
t.Run("PreStop-HTTPGet", func(t *testing.T) {
|
||||
defer func() { fakeHttp.url = "" }()
|
||||
defer func() { fakeHTTP.url = "" }()
|
||||
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
|
||||
m.killContainer(testPod, cID, "foo", "testKill", &gracePeriod)
|
||||
|
||||
if !strings.Contains(fakeHttp.url, httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||
if !strings.Contains(fakeHTTP.url, httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||
t.Errorf("HTTP Prestop hook was not invoked")
|
||||
}
|
||||
})
|
||||
@ -295,7 +295,7 @@ func TestLifeCycleHook(t *testing.T) {
|
||||
|
||||
m.killContainer(testPod, cID, "foo", "testKill", &gracePeriodLocal)
|
||||
|
||||
if strings.Contains(fakeHttp.url, httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||
if strings.Contains(fakeHTTP.url, httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||
t.Errorf("HTTP Should not execute when gracePeriod is 0")
|
||||
}
|
||||
})
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_gc.go
generated
vendored
@ -23,9 +23,9 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -39,7 +39,7 @@ type containerGC struct {
|
||||
}
|
||||
|
||||
// NewContainerGC creates a new containerGC.
|
||||
func NewContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC {
|
||||
func newContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC {
|
||||
return &containerGC{
|
||||
client: client,
|
||||
manager: manager,
|
||||
@ -123,7 +123,7 @@ func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int
|
||||
numToKeep := len(containers) - toRemove
|
||||
for i := len(containers) - 1; i >= numToKeep; i-- {
|
||||
if err := cgc.manager.removeContainer(containers[i].id); err != nil {
|
||||
glog.Errorf("Failed to remove container %q: %v", containers[i].id, err)
|
||||
klog.Errorf("Failed to remove container %q: %v", containers[i].id, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,16 +145,16 @@ func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemo
|
||||
|
||||
// removeSandbox removes the sandbox by sandboxID.
|
||||
func (cgc *containerGC) removeSandbox(sandboxID string) {
|
||||
glog.V(4).Infof("Removing sandbox %q", sandboxID)
|
||||
klog.V(4).Infof("Removing sandbox %q", sandboxID)
|
||||
// In normal cases, kubelet should've already called StopPodSandbox before
|
||||
// GC kicks in. To guard against the rare cases where this is not true, try
|
||||
// stopping the sandbox before removing it.
|
||||
if err := cgc.client.StopPodSandbox(sandboxID); err != nil {
|
||||
glog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err)
|
||||
klog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err)
|
||||
return
|
||||
}
|
||||
if err := cgc.client.RemovePodSandbox(sandboxID); err != nil {
|
||||
glog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err)
|
||||
klog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,7 +328,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
|
||||
}
|
||||
err := osInterface.RemoveAll(filepath.Join(podLogsRootDirectory, name))
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove pod logs directory %q: %v", name, err)
|
||||
klog.Errorf("Failed to remove pod logs directory %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -340,7 +340,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
|
||||
if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) {
|
||||
err := osInterface.Remove(logSymlink)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err)
|
||||
klog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image.go
generated
vendored
@ -17,9 +17,9 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
credentialprovidersecrets "k8s.io/kubernetes/pkg/credentialprovider/secrets"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
@ -44,11 +44,11 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
||||
imgSpec := &runtimeapi.ImageSpec{Image: img}
|
||||
creds, withCredentials := keyring.Lookup(repoToPull)
|
||||
if !withCredentials {
|
||||
glog.V(3).Infof("Pulling image %q without credentials", img)
|
||||
klog.V(3).Infof("Pulling image %q without credentials", img)
|
||||
|
||||
imageRef, err := m.imageService.PullImage(imgSpec, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Pull image %q failed: %v", img, err)
|
||||
klog.Errorf("Pull image %q failed: %v", img, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul
|
||||
func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) {
|
||||
status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image.Image})
|
||||
if err != nil {
|
||||
glog.Errorf("ImageStatus for image %q failed: %v", image, err)
|
||||
klog.Errorf("ImageStatus for image %q failed: %v", image, err)
|
||||
return "", err
|
||||
}
|
||||
if status == nil {
|
||||
@ -99,7 +99,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
|
||||
|
||||
allImages, err := m.imageService.ListImages(nil)
|
||||
if err != nil {
|
||||
glog.Errorf("ListImages failed: %v", err)
|
||||
klog.Errorf("ListImages failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error)
|
||||
func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error {
|
||||
err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image})
|
||||
if err != nil {
|
||||
glog.Errorf("Remove image %q failed: %v", image.Image, err)
|
||||
klog.Errorf("Remove image %q failed: %v", image.Image, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -133,7 +133,7 @@ func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) e
|
||||
func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, error) {
|
||||
allImages, err := m.imageService.ListImages(nil)
|
||||
if err != nil {
|
||||
glog.Errorf("ListImages failed: %v", err)
|
||||
klog.Errorf("ListImages failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
stats := &kubecontainer.ImageStats{}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_image_test.go
generated
vendored
@ -109,8 +109,8 @@ func TestPullWithSecrets(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
dockerConfigJson := map[string]map[string]map[string]string{"auths": dockerCfg}
|
||||
dockerConfigJsonContent, err := json.Marshal(dockerConfigJson)
|
||||
dockerConfigJSON := map[string]map[string]map[string]string{"auths": dockerCfg}
|
||||
dockerConfigJSONContent, err := json.Marshal(dockerConfigJSON)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -153,7 +153,7 @@ func TestPullWithSecrets(t *testing.T) {
|
||||
},
|
||||
"builtin keyring secrets, but use passed with new docker config": {
|
||||
"ubuntu",
|
||||
[]v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJsonContent}}},
|
||||
[]v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJSONContent}}},
|
||||
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
|
||||
"index.docker.io/v1/": {Username: "built-in", Password: "password", Provider: nil},
|
||||
}),
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_logs.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
@ -27,9 +28,9 @@ import (
|
||||
// ReadLogs read the container log and redirect into stdout and stderr.
|
||||
// Note that containerID is only needed when following the log, or else
|
||||
// just pass in empty string "".
|
||||
func (m *kubeGenericRuntimeManager) ReadLogs(path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
func (m *kubeGenericRuntimeManager) ReadLogs(ctx context.Context, path, containerID string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
// Convert v1.PodLogOptions into internal log options.
|
||||
opts := logs.NewLogOptions(apiOpts, time.Now())
|
||||
|
||||
return logs.ReadLogs(path, containerID, opts, m.runtimeService, stdout, stderr)
|
||||
return logs.ReadLogs(ctx, path, containerID, opts, m.runtimeService, stdout, stderr)
|
||||
}
|
||||
|
140
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
140
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
@ -22,13 +22,14 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
@ -42,10 +43,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -98,6 +99,9 @@ type kubeGenericRuntimeManager struct {
|
||||
// If true, enforce container cpu limits with CFS quota support
|
||||
cpuCFSQuota bool
|
||||
|
||||
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
|
||||
cpuCFSQuotaPeriod metav1.Duration
|
||||
|
||||
// wrapped image puller.
|
||||
imagePuller images.ImageManager
|
||||
|
||||
@ -116,8 +120,12 @@ type kubeGenericRuntimeManager struct {
|
||||
|
||||
// A shim to legacy functions for backward compatibility.
|
||||
legacyLogProvider LegacyLogProvider
|
||||
|
||||
// Manage RuntimeClass resources.
|
||||
runtimeClassManager *runtimeclass.Manager
|
||||
}
|
||||
|
||||
// KubeGenericRuntime is a interface contains interfaces for container runtime and command.
|
||||
type KubeGenericRuntime interface {
|
||||
kubecontainer.Runtime
|
||||
kubecontainer.StreamingRuntime
|
||||
@ -146,14 +154,17 @@ func NewKubeGenericRuntimeManager(
|
||||
imagePullQPS float32,
|
||||
imagePullBurst int,
|
||||
cpuCFSQuota bool,
|
||||
cpuCFSQuotaPeriod metav1.Duration,
|
||||
runtimeService internalapi.RuntimeService,
|
||||
imageService internalapi.ImageManagerService,
|
||||
internalLifecycle cm.InternalContainerLifecycle,
|
||||
legacyLogProvider LegacyLogProvider,
|
||||
runtimeClassManager *runtimeclass.Manager,
|
||||
) (KubeGenericRuntime, error) {
|
||||
kubeRuntimeManager := &kubeGenericRuntimeManager{
|
||||
recorder: recorder,
|
||||
cpuCFSQuota: cpuCFSQuota,
|
||||
cpuCFSQuotaPeriod: cpuCFSQuotaPeriod,
|
||||
seccompProfileRoot: seccompProfileRoot,
|
||||
livenessManager: livenessManager,
|
||||
containerRefManager: containerRefManager,
|
||||
@ -165,25 +176,26 @@ func NewKubeGenericRuntimeManager(
|
||||
keyring: credentialprovider.NewDockerKeyring(),
|
||||
internalLifecycle: internalLifecycle,
|
||||
legacyLogProvider: legacyLogProvider,
|
||||
runtimeClassManager: runtimeClassManager,
|
||||
}
|
||||
|
||||
typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion)
|
||||
if err != nil {
|
||||
glog.Errorf("Get runtime version failed: %v", err)
|
||||
klog.Errorf("Get runtime version failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only matching kubeRuntimeAPIVersion is supported now
|
||||
// TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642
|
||||
if typedVersion.Version != kubeRuntimeAPIVersion {
|
||||
glog.Errorf("Runtime api version %s is not supported, only %s is supported now",
|
||||
klog.Errorf("Runtime api version %s is not supported, only %s is supported now",
|
||||
typedVersion.Version,
|
||||
kubeRuntimeAPIVersion)
|
||||
return nil, ErrVersionNotSupported
|
||||
}
|
||||
|
||||
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
|
||||
glog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s",
|
||||
klog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s",
|
||||
typedVersion.RuntimeName,
|
||||
typedVersion.RuntimeVersion,
|
||||
typedVersion.RuntimeApiVersion)
|
||||
@ -193,7 +205,7 @@ func NewKubeGenericRuntimeManager(
|
||||
// new runtime interface
|
||||
if _, err := osInterface.Stat(podLogsRootDirectory); os.IsNotExist(err) {
|
||||
if err := osInterface.MkdirAll(podLogsRootDirectory, 0755); err != nil {
|
||||
glog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err)
|
||||
klog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -205,7 +217,7 @@ func NewKubeGenericRuntimeManager(
|
||||
imagePullQPS,
|
||||
imagePullBurst)
|
||||
kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager)
|
||||
kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, podStateProvider, kubeRuntimeManager)
|
||||
kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager)
|
||||
|
||||
kubeRuntimeManager.versionCache = cache.NewObjectCache(
|
||||
func() (interface{}, error) {
|
||||
@ -232,7 +244,7 @@ func newRuntimeVersion(version string) (*utilversion.Version, error) {
|
||||
func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) {
|
||||
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion)
|
||||
if err != nil {
|
||||
glog.Errorf("Get remote runtime typed version failed: %v", err)
|
||||
klog.Errorf("Get remote runtime typed version failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return typedVersion, nil
|
||||
@ -242,7 +254,7 @@ func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionRespon
|
||||
func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) {
|
||||
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion)
|
||||
if err != nil {
|
||||
glog.Errorf("Get remote runtime version failed: %v", err)
|
||||
klog.Errorf("Get remote runtime version failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -284,7 +296,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
|
||||
for i := range sandboxes {
|
||||
s := sandboxes[i]
|
||||
if s.Metadata == nil {
|
||||
glog.V(4).Infof("Sandbox does not have metadata: %+v", s)
|
||||
klog.V(4).Infof("Sandbox does not have metadata: %+v", s)
|
||||
continue
|
||||
}
|
||||
podUID := kubetypes.UID(s.Metadata.Uid)
|
||||
@ -298,7 +310,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
|
||||
p := pods[podUID]
|
||||
converted, err := m.sandboxToKubeContainer(s)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err)
|
||||
klog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err)
|
||||
continue
|
||||
}
|
||||
p.Sandboxes = append(p.Sandboxes, converted)
|
||||
@ -311,7 +323,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
|
||||
for i := range containers {
|
||||
c := containers[i]
|
||||
if c.Metadata == nil {
|
||||
glog.V(4).Infof("Container does not have metadata: %+v", c)
|
||||
klog.V(4).Infof("Container does not have metadata: %+v", c)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -328,7 +340,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
|
||||
|
||||
converted, err := m.toKubeContainer(c)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err)
|
||||
klog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -382,7 +394,7 @@ type podActions struct {
|
||||
// (changed, new attempt, original sandboxID if exist).
|
||||
func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) {
|
||||
if len(podStatus.SandboxStatuses) == 0 {
|
||||
glog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
|
||||
klog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
|
||||
return true, 0, ""
|
||||
}
|
||||
|
||||
@ -396,23 +408,23 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku
|
||||
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
|
||||
sandboxStatus := podStatus.SandboxStatuses[0]
|
||||
if readySandboxCount > 1 {
|
||||
glog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod))
|
||||
klog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
|
||||
glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
|
||||
klog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when network namespace changed.
|
||||
if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != networkNamespaceForPod(pod) {
|
||||
glog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod))
|
||||
klog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, ""
|
||||
}
|
||||
|
||||
// Needs to create a new sandbox when the sandbox does not have an IP address.
|
||||
if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network.Ip == "" {
|
||||
glog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod))
|
||||
klog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod))
|
||||
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
|
||||
}
|
||||
|
||||
@ -438,7 +450,7 @@ func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) boo
|
||||
|
||||
// computePodActions checks whether the pod spec has changed and returns the changes if true.
|
||||
func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {
|
||||
glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)
|
||||
klog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)
|
||||
|
||||
createPodSandbox, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)
|
||||
changes := podActions{
|
||||
@ -455,6 +467,10 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
if createPodSandbox {
|
||||
if !shouldRestartOnFailure(pod) && attempt != 0 {
|
||||
// Should not restart the pod, just return.
|
||||
// we should not create a sandbox for a pod if it is already done.
|
||||
// if all containers are done and should not be started, there is no need to create a new sandbox.
|
||||
// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.
|
||||
changes.CreateSandbox = false
|
||||
return changes
|
||||
}
|
||||
if len(pod.Spec.InitContainers) != 0 {
|
||||
@ -500,7 +516,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
// to it.
|
||||
if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {
|
||||
if err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {
|
||||
glog.Errorf("internal container post-stop lifecycle hook failed for container %v in pod %v with error %v",
|
||||
klog.Errorf("internal container post-stop lifecycle hook failed for container %v in pod %v with error %v",
|
||||
container.Name, pod.Name, err)
|
||||
}
|
||||
}
|
||||
@ -510,7 +526,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
|
||||
if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
|
||||
message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
|
||||
glog.Info(message)
|
||||
klog.V(3).Infof(message)
|
||||
changes.ContainersToStart = append(changes.ContainersToStart, idx)
|
||||
}
|
||||
continue
|
||||
@ -528,7 +544,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
reason = "Container failed liveness probe."
|
||||
} else {
|
||||
// Keep the container.
|
||||
keepCount += 1
|
||||
keepCount++
|
||||
continue
|
||||
}
|
||||
|
||||
@ -546,7 +562,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
container: &pod.Spec.Containers[idx],
|
||||
message: message,
|
||||
}
|
||||
glog.V(2).Infof("Container %q (%q) of pod %s: %s", container.Name, containerStatus.ID, format.Pod(pod), message)
|
||||
klog.V(2).Infof("Container %q (%q) of pod %s: %s", container.Name, containerStatus.ID, format.Pod(pod), message)
|
||||
}
|
||||
|
||||
if keepCount == 0 && len(changes.ContainersToStart) == 0 {
|
||||
@ -567,31 +583,31 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
|
||||
// Step 1: Compute sandbox and container changes.
|
||||
podContainerChanges := m.computePodActions(pod, podStatus)
|
||||
glog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod))
|
||||
klog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod))
|
||||
if podContainerChanges.CreateSandbox {
|
||||
ref, err := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
|
||||
klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
|
||||
}
|
||||
if podContainerChanges.SandboxID != "" {
|
||||
m.recorder.Eventf(ref, v1.EventTypeNormal, events.SandboxChanged, "Pod sandbox changed, it will be killed and re-created.")
|
||||
} else {
|
||||
glog.V(4).Infof("SyncPod received new pod %q, will create a sandbox for it", format.Pod(pod))
|
||||
klog.V(4).Infof("SyncPod received new pod %q, will create a sandbox for it", format.Pod(pod))
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Kill the pod if the sandbox has changed.
|
||||
if podContainerChanges.KillPod {
|
||||
if !podContainerChanges.CreateSandbox {
|
||||
glog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod))
|
||||
klog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod))
|
||||
} else {
|
||||
glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod))
|
||||
klog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod))
|
||||
}
|
||||
|
||||
killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
|
||||
result.AddPodSyncResult(killResult)
|
||||
if killResult.Error() != nil {
|
||||
glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error())
|
||||
klog.Errorf("killPodWithSyncResult failed: %v", killResult.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@ -601,12 +617,12 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
||||
} else {
|
||||
// Step 3: kill any running containers in this pod which are not to keep.
|
||||
for containerID, containerInfo := range podContainerChanges.ContainersToKill {
|
||||
glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod))
|
||||
klog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod))
|
||||
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
|
||||
result.AddSyncResult(killContainerResult)
|
||||
if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil {
|
||||
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
|
||||
glog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err)
|
||||
klog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -637,30 +653,30 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
||||
var msg string
|
||||
var err error
|
||||
|
||||
glog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod))
|
||||
klog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod))
|
||||
createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod))
|
||||
result.AddSyncResult(createSandboxResult)
|
||||
podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt)
|
||||
if err != nil {
|
||||
createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg)
|
||||
glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
klog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if referr != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr)
|
||||
klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr)
|
||||
}
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed create pod sandbox: %v", err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod))
|
||||
klog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod))
|
||||
|
||||
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
|
||||
if err != nil {
|
||||
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
|
||||
if referr != nil {
|
||||
glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr)
|
||||
klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr)
|
||||
}
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err)
|
||||
glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod))
|
||||
klog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod))
|
||||
result.Fail(err)
|
||||
return
|
||||
}
|
||||
@ -670,7 +686,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
||||
if !kubecontainer.IsHostNetworkPod(pod) {
|
||||
// Overwrite the podIP passed in the pod status, since we just started the pod sandbox.
|
||||
podIP = m.determinePodSandboxIP(pod.Namespace, pod.Name, podSandboxStatus)
|
||||
glog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod))
|
||||
klog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod))
|
||||
}
|
||||
}
|
||||
|
||||
@ -680,7 +696,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(pod, podContainerChanges.Attempt)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
|
||||
glog.Error(message)
|
||||
klog.Error(message)
|
||||
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message)
|
||||
return
|
||||
}
|
||||
@ -693,11 +709,11 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
||||
isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
|
||||
if isInBackOff {
|
||||
startContainerResult.Fail(err, msg)
|
||||
glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod))
|
||||
klog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod))
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
|
||||
klog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
|
||||
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, kubecontainer.ContainerTypeInit); err != nil {
|
||||
startContainerResult.Fail(err, msg)
|
||||
utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg))
|
||||
@ -705,7 +721,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
||||
}
|
||||
|
||||
// Successfully started the container; clear the entry in the failure
|
||||
glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod))
|
||||
klog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod))
|
||||
}
|
||||
|
||||
// Step 6: start containers in podContainerChanges.ContainersToStart.
|
||||
@ -717,18 +733,18 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat
|
||||
isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
|
||||
if isInBackOff {
|
||||
startContainerResult.Fail(err, msg)
|
||||
glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod))
|
||||
klog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod))
|
||||
continue
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
||||
klog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
|
||||
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, kubecontainer.ContainerTypeRegular); err != nil {
|
||||
startContainerResult.Fail(err, msg)
|
||||
// known errors that are logged in other places are logged at higher levels here to avoid
|
||||
// repetitive log spam
|
||||
switch {
|
||||
case err == images.ErrImagePullBackOff:
|
||||
glog.V(3).Infof("container start failed: %v: %s", err, msg)
|
||||
klog.V(3).Infof("container start failed: %v: %s", err, msg)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
|
||||
}
|
||||
@ -754,7 +770,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
||||
return false, "", nil
|
||||
}
|
||||
|
||||
glog.Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod))
|
||||
klog.V(3).Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod))
|
||||
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
|
||||
ts := cStatus.FinishedAt
|
||||
// backOff requires a unique key to identify the container.
|
||||
@ -764,7 +780,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain
|
||||
m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container")
|
||||
}
|
||||
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
|
||||
glog.Infof("%s", err.Error())
|
||||
klog.V(3).Infof("%s", err.Error())
|
||||
return true, err.Error(), kubecontainer.ErrCrashLoopBackOff
|
||||
}
|
||||
|
||||
@ -796,31 +812,13 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo
|
||||
for _, podSandbox := range runningPod.Sandboxes {
|
||||
if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil {
|
||||
killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
|
||||
glog.Errorf("Failed to stop sandbox %q", podSandbox.ID)
|
||||
klog.Errorf("Failed to stop sandbox %q", podSandbox.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// isHostNetwork checks whether the pod is running in host-network mode.
|
||||
func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *v1.Pod) (bool, error) {
|
||||
if pod != nil {
|
||||
return kubecontainer.IsHostNetworkPod(pod), nil
|
||||
}
|
||||
|
||||
podStatus, err := m.runtimeService.PodSandboxStatus(podSandBoxID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if podStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetPodStatus retrieves the status of the pod, including the
|
||||
// information of all containers in the pod that are visible in Runtime.
|
||||
func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
|
||||
@ -849,14 +847,14 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
||||
UID: uid,
|
||||
},
|
||||
})
|
||||
glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName)
|
||||
klog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName)
|
||||
|
||||
sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs))
|
||||
podIP := ""
|
||||
for idx, podSandboxID := range podSandboxIDs {
|
||||
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
|
||||
if err != nil {
|
||||
glog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err)
|
||||
klog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err)
|
||||
return nil, err
|
||||
}
|
||||
sandboxStatuses[idx] = podSandboxStatus
|
||||
@ -870,7 +868,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
|
||||
// Get statuses of all containers visible in the pod.
|
||||
containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace)
|
||||
if err != nil {
|
||||
glog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err)
|
||||
klog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -901,7 +899,7 @@ func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.Contai
|
||||
func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (kubecontainer.ContainerID, error) {
|
||||
formattedPod := kubecontainer.FormatPod(pod)
|
||||
if len(pod.Sandboxes) == 0 {
|
||||
glog.Errorf("No sandboxes are found for pod %q", formattedPod)
|
||||
klog.Errorf("No sandboxes are found for pod %q", formattedPod)
|
||||
return kubecontainer.ContainerID{}, fmt.Errorf("sandboxes for pod %q not found", formattedPod)
|
||||
}
|
||||
|
||||
@ -914,7 +912,7 @@ func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (k
|
||||
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
|
||||
// TODO(#35531): do we really want to write a method on this manager for each
|
||||
// field of the config?
|
||||
glog.Infof("updating runtime config through cri with podcidr %v", podCIDR)
|
||||
klog.Infof("updating runtime config through cri with podcidr %v", podCIDR)
|
||||
return m.runtimeService.UpdateRuntimeConfig(
|
||||
&runtimeapi.RuntimeConfig{
|
||||
NetworkConfig: &runtimeapi.NetworkConfig{
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go
generated
vendored
@ -54,7 +54,7 @@ func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (*
|
||||
// we may want to set memory capacity.
|
||||
machineInfo := &cadvisorapi.MachineInfo{}
|
||||
osInterface := &containertest.FakeOS{}
|
||||
manager, err := NewFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring)
|
||||
manager, err := newFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring)
|
||||
return fakeRuntimeService, fakeImageService, manager, err
|
||||
}
|
||||
|
||||
@ -906,6 +906,29 @@ func TestComputePodActions(t *testing.T) {
|
||||
// TODO: Add a test case for containers which failed the liveness
|
||||
// check. Will need to fake the livessness check result.
|
||||
},
|
||||
"Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and all containers exited": {
|
||||
mutatePodFn: func(pod *v1.Pod) {
|
||||
pod.Spec.RestartPolicy = v1.RestartPolicyNever
|
||||
},
|
||||
mutateStatusFn: func(status *kubecontainer.PodStatus) {
|
||||
// no ready sandbox
|
||||
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
status.SandboxStatuses[0].Metadata.Attempt = uint32(1)
|
||||
// all containers exited
|
||||
for i := range status.ContainerStatuses {
|
||||
status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
|
||||
status.ContainerStatuses[i].ExitCode = 0
|
||||
}
|
||||
},
|
||||
actions: podActions{
|
||||
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
||||
Attempt: uint32(2),
|
||||
CreateSandbox: false,
|
||||
KillPod: true,
|
||||
ContainersToStart: []int{},
|
||||
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
|
||||
},
|
||||
},
|
||||
} {
|
||||
pod, status := makeBasePodAndStatus()
|
||||
if test.mutatePodFn != nil {
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go
generated
vendored
@ -22,10 +22,10 @@ import (
|
||||
"net/url"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -38,7 +38,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
|
||||
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
|
||||
glog.Error(message)
|
||||
klog.Error(message)
|
||||
return "", message, err
|
||||
}
|
||||
|
||||
@ -46,14 +46,23 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32
|
||||
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Create pod log directory for pod %q failed: %v", format.Pod(pod), err)
|
||||
glog.Errorf(message)
|
||||
klog.Errorf(message)
|
||||
return "", message, err
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig)
|
||||
runtimeHandler := ""
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) && m.runtimeClassManager != nil {
|
||||
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
return "", message, err
|
||||
}
|
||||
}
|
||||
|
||||
podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err)
|
||||
glog.Error(message)
|
||||
klog.Error(message)
|
||||
return "", message, err
|
||||
}
|
||||
|
||||
@ -195,7 +204,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
|
||||
|
||||
resp, err := m.runtimeService.ListPodSandbox(filter)
|
||||
if err != nil {
|
||||
glog.Errorf("ListPodSandbox failed: %v", err)
|
||||
klog.Errorf("ListPodSandbox failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -205,14 +214,14 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi
|
||||
// determinePodSandboxIP determines the IP address of the given pod sandbox.
|
||||
func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) string {
|
||||
if podSandbox.Network == nil {
|
||||
glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP")
|
||||
klog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP")
|
||||
return ""
|
||||
}
|
||||
ip := podSandbox.Network.Ip
|
||||
if len(ip) != 0 && net.ParseIP(ip) == nil {
|
||||
// ip could be an empty string if runtime is not responsible for the
|
||||
// IP (e.g., host networking).
|
||||
glog.Warningf("Pod Sandbox reported an unparseable IP %v", ip)
|
||||
klog.Warningf("Pod Sandbox reported an unparseable IP %v", ip)
|
||||
return ""
|
||||
}
|
||||
return ip
|
||||
@ -231,7 +240,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s
|
||||
}
|
||||
sandboxes, err := m.runtimeService.ListPodSandbox(filter)
|
||||
if err != nil {
|
||||
glog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err)
|
||||
klog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
82
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go
generated
vendored
82
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go
generated
vendored
@ -22,31 +22,24 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
|
||||
rctest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory.
|
||||
func TestCreatePodSandbox(t *testing.T) {
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "busybox",
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, err)
|
||||
pod := newTestPod()
|
||||
|
||||
fakeOS := m.osInterface.(*containertest.FakeOS)
|
||||
fakeOS.MkdirAllFn = func(path string, perm os.FileMode) error {
|
||||
@ -63,3 +56,60 @@ func TestCreatePodSandbox(t *testing.T) {
|
||||
assert.Equal(t, len(sandboxes), 1)
|
||||
// TODO Check pod sandbox configuration
|
||||
}
|
||||
|
||||
// TestCreatePodSandbox_RuntimeClass tests creating sandbox with RuntimeClasses enabled.
|
||||
func TestCreatePodSandbox_RuntimeClass(t *testing.T) {
|
||||
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.RuntimeClass, true)()
|
||||
|
||||
rcm := runtimeclass.NewManager(rctest.NewPopulatedDynamicClient())
|
||||
defer rctest.StartManagerSync(t, rcm)()
|
||||
|
||||
fakeRuntime, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
m.runtimeClassManager = rcm
|
||||
|
||||
tests := map[string]struct {
|
||||
rcn *string
|
||||
expectedHandler string
|
||||
expectError bool
|
||||
}{
|
||||
"unspecified RuntimeClass": {rcn: nil, expectedHandler: ""},
|
||||
"valid RuntimeClass": {rcn: pointer.StringPtr(rctest.SandboxRuntimeClass), expectedHandler: rctest.SandboxRuntimeHandler},
|
||||
"missing RuntimeClass": {rcn: pointer.StringPtr("phantom"), expectError: true},
|
||||
}
|
||||
for name, test := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
pod := newTestPod()
|
||||
pod.Spec.RuntimeClassName = test.rcn
|
||||
|
||||
id, _, err := m.createPodSandbox(pod, 1)
|
||||
if test.expectError {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
|
||||
assert.Equal(t, test.expectedHandler, fakeRuntime.Sandboxes[id].RuntimeHandler)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "busybox",
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels.go
generated
vendored
@ -20,10 +20,10 @@ import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
@ -135,7 +135,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount
|
||||
// Using json encoding so that the PreStop handler object is readable after writing as a label
|
||||
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
|
||||
klog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
|
||||
} else {
|
||||
annotations[containerPreStopHandlerLabel] = string(rawPreStop)
|
||||
}
|
||||
@ -144,7 +144,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount
|
||||
if len(container.Ports) > 0 {
|
||||
rawContainerPorts, err := json.Marshal(container.Ports)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err)
|
||||
klog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err)
|
||||
} else {
|
||||
annotations[containerPortsLabel] = string(rawContainerPorts)
|
||||
}
|
||||
@ -203,28 +203,28 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo
|
||||
}
|
||||
|
||||
if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil {
|
||||
glog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err)
|
||||
klog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err)
|
||||
}
|
||||
if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil {
|
||||
glog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err)
|
||||
klog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err)
|
||||
}
|
||||
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(annotations, podDeletionGracePeriodLabel); err != nil {
|
||||
glog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err)
|
||||
klog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err)
|
||||
}
|
||||
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(annotations, podTerminationGracePeriodLabel); err != nil {
|
||||
glog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err)
|
||||
klog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err)
|
||||
}
|
||||
|
||||
preStopHandler := &v1.Handler{}
|
||||
if found, err := getJSONObjectFromLabel(annotations, containerPreStopHandlerLabel, preStopHandler); err != nil {
|
||||
glog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err)
|
||||
klog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err)
|
||||
} else if found {
|
||||
containerInfo.PreStopHandler = preStopHandler
|
||||
}
|
||||
|
||||
containerPorts := []v1.ContainerPort{}
|
||||
if found, err := getJSONObjectFromLabel(annotations, containerPortsLabel, &containerPorts); err != nil {
|
||||
glog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err)
|
||||
klog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err)
|
||||
} else if found {
|
||||
containerInfo.ContainerPorts = containerPorts
|
||||
}
|
||||
@ -237,7 +237,7 @@ func getStringValueFromLabel(labels map[string]string, label string) string {
|
||||
return value
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
// Return empty string "" for these containers, the caller will get value by other ways.
|
||||
return ""
|
||||
}
|
||||
@ -252,7 +252,7 @@ func getIntValueFromLabel(labels map[string]string, label string) (int, error) {
|
||||
return intValue, nil
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
// Just set the value to 0
|
||||
return 0, nil
|
||||
}
|
||||
@ -267,7 +267,7 @@ func getUint64ValueFromLabel(labels map[string]string, label string) (uint64, er
|
||||
return intValue, nil
|
||||
}
|
||||
// Do not report error, because there should be many old containers without label now.
|
||||
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
|
||||
// Just set the value to 0
|
||||
return 0, nil
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels_test.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels_test.go
generated
vendored
@ -48,15 +48,15 @@ func TestContainerLabels(t *testing.T) {
|
||||
},
|
||||
}
|
||||
container := &v1.Container{
|
||||
Name: "test_container",
|
||||
Name: "test_container",
|
||||
TerminationMessagePath: "/somepath",
|
||||
Lifecycle: lifecycle,
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test_pod_namespace",
|
||||
UID: "test_pod_uid",
|
||||
Name: "test_pod",
|
||||
Namespace: "test_pod_namespace",
|
||||
UID: "test_pod_uid",
|
||||
DeletionGracePeriodSeconds: &deletionGracePeriod,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
@ -193,16 +193,16 @@ func TestContainerAnnotations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
container := &v1.Container{
|
||||
Name: "test_container",
|
||||
Ports: containerPorts,
|
||||
Name: "test_container",
|
||||
Ports: containerPorts,
|
||||
TerminationMessagePath: "/somepath",
|
||||
Lifecycle: lifecycle,
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test_pod",
|
||||
Namespace: "test_pod_namespace",
|
||||
UID: "test_pod_uid",
|
||||
Name: "test_pod",
|
||||
Namespace: "test_pod_namespace",
|
||||
UID: "test_pod_uid",
|
||||
DeletionGracePeriodSeconds: &deletionGracePeriod,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
@ -214,10 +214,10 @@ func TestContainerAnnotations(t *testing.T) {
|
||||
ContainerPorts: containerPorts,
|
||||
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
|
||||
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
|
||||
Hash: kubecontainer.HashContainer(container),
|
||||
RestartCount: restartCount,
|
||||
TerminationMessagePath: container.TerminationMessagePath,
|
||||
PreStopHandler: container.Lifecycle.PreStop,
|
||||
Hash: kubecontainer.HashContainer(container),
|
||||
RestartCount: restartCount,
|
||||
TerminationMessagePath: container.TerminationMessagePath,
|
||||
PreStopHandler: container.Lifecycle.PreStop,
|
||||
}
|
||||
|
||||
// Test whether we can get right information from label
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy.go
generated
vendored
@ -44,9 +44,9 @@ func legacyLogSymlink(containerID string, containerName, podName, podNamespace s
|
||||
containerName, containerID)
|
||||
}
|
||||
|
||||
func logSymlink(containerLogsDir, podFullName, containerName, dockerId string) string {
|
||||
func logSymlink(containerLogsDir, podFullName, containerName, dockerID string) string {
|
||||
suffix := fmt.Sprintf(".%s", legacyLogSuffix)
|
||||
logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId)
|
||||
logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerID)
|
||||
// Length of a filename cannot exceed 255 characters in ext4 on Linux.
|
||||
if len(logPath) > ext4MaxFileNameLen-len(suffix) {
|
||||
logPath = logPath[:ext4MaxFileNameLen-len(suffix)]
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/legacy_test.go
generated
vendored
@ -40,10 +40,10 @@ func TestLogSymLink(t *testing.T) {
|
||||
containerLogsDir := "/foo/bar"
|
||||
podFullName := randStringBytes(128)
|
||||
containerName := randStringBytes(70)
|
||||
dockerId := randStringBytes(80)
|
||||
dockerID := randStringBytes(80)
|
||||
// The file name cannot exceed 255 characters. Since .log suffix is required, the prefix cannot exceed 251 characters.
|
||||
expectedPath := path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId)[:251]+".log")
|
||||
as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, dockerId))
|
||||
expectedPath := path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerID)[:251]+".log")
|
||||
as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, dockerID))
|
||||
}
|
||||
|
||||
func TestLegacyLogSymLink(t *testing.T) {
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/BUILD
generated
vendored
@ -9,10 +9,10 @@ go_library(
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/util/tail:go_default_library",
|
||||
"//vendor/github.com/docker/docker/pkg/jsonlog:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog:go_default_library",
|
||||
"//vendor/github.com/fsnotify/fsnotify:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -22,9 +22,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go
generated
vendored
@ -19,6 +19,7 @@ package logs
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -27,9 +28,9 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/jsonlog"
|
||||
"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
@ -266,7 +267,7 @@ func (w *logWriter) write(msg *logMessage) error {
|
||||
// ReadLogs read the container log and redirect into stdout and stderr.
|
||||
// Note that containerID is only needed when following the log, or else
|
||||
// just pass in empty string "".
|
||||
func ReadLogs(path, containerID string, opts *LogOptions, runtimeService internalapi.RuntimeService, stdout, stderr io.Writer) error {
|
||||
func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, runtimeService internalapi.RuntimeService, stdout, stderr io.Writer) error {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file %q: %v", path, err)
|
||||
@ -292,7 +293,7 @@ func ReadLogs(path, containerID string, opts *LogOptions, runtimeService interna
|
||||
msg := &logMessage{}
|
||||
for {
|
||||
if stop {
|
||||
glog.V(2).Infof("Finish parsing log file %q", path)
|
||||
klog.V(2).Infof("Finish parsing log file %q", path)
|
||||
return nil
|
||||
}
|
||||
l, err := r.ReadBytes(eol[0])
|
||||
@ -317,7 +318,7 @@ func ReadLogs(path, containerID string, opts *LogOptions, runtimeService interna
|
||||
}
|
||||
}
|
||||
// Wait until the next log change.
|
||||
if found, err := waitLogs(containerID, watcher, runtimeService); !found {
|
||||
if found, err := waitLogs(ctx, containerID, watcher, runtimeService); !found {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
@ -327,7 +328,7 @@ func ReadLogs(path, containerID string, opts *LogOptions, runtimeService interna
|
||||
if len(l) == 0 {
|
||||
continue
|
||||
}
|
||||
glog.Warningf("Incomplete line in log file %q: %q", path, l)
|
||||
klog.Warningf("Incomplete line in log file %q: %q", path, l)
|
||||
}
|
||||
if parse == nil {
|
||||
// Initialize the log parsing function.
|
||||
@ -339,16 +340,16 @@ func ReadLogs(path, containerID string, opts *LogOptions, runtimeService interna
|
||||
// Parse the log line.
|
||||
msg.reset()
|
||||
if err := parse(l, msg); err != nil {
|
||||
glog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l)
|
||||
klog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l)
|
||||
continue
|
||||
}
|
||||
// Write the log line into the stream.
|
||||
if err := writer.write(msg); err != nil {
|
||||
if err == errMaximumWrite {
|
||||
glog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes)
|
||||
klog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes)
|
||||
return nil
|
||||
}
|
||||
glog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg)
|
||||
klog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -361,7 +362,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
|
||||
}
|
||||
// Only keep following container log when it is running.
|
||||
if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING {
|
||||
glog.V(5).Infof("Container %q is not running (state=%q)", id, s.State)
|
||||
klog.V(5).Infof("Container %q is not running (state=%q)", id, s.State)
|
||||
// Do not return error because it's normal that the container stops
|
||||
// during waiting.
|
||||
return false, nil
|
||||
@ -371,7 +372,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) {
|
||||
|
||||
// waitLogs wait for the next log write. It returns a boolean and an error. The boolean
|
||||
// indicates whether a new log is found; the error is error happens during waiting new logs.
|
||||
func waitLogs(id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, error) {
|
||||
func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, error) {
|
||||
// no need to wait if the pod is not running
|
||||
if running, err := isContainerRunning(id, runtimeService); !running {
|
||||
return false, err
|
||||
@ -379,15 +380,17 @@ func waitLogs(id string, w *fsnotify.Watcher, runtimeService internalapi.Runtime
|
||||
errRetry := 5
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, fmt.Errorf("context cancelled")
|
||||
case e := <-w.Events:
|
||||
switch e.Op {
|
||||
case fsnotify.Write:
|
||||
return true, nil
|
||||
default:
|
||||
glog.Errorf("Unexpected fsnotify event: %v, retrying...", e)
|
||||
klog.Errorf("Unexpected fsnotify event: %v, retrying...", e)
|
||||
}
|
||||
case err := <-w.Errors:
|
||||
glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry)
|
||||
klog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry)
|
||||
if errRetry == 0 {
|
||||
return false, err
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/main_test.go
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/main_test.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kuberuntime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
|
||||
_ "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
|
||||
}
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go
generated
vendored
@ -30,7 +30,10 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
|
||||
effectiveSc := securitycontext.DetermineEffectiveSecurityContext(pod, container)
|
||||
synthesized := convertToRuntimeSecurityContext(effectiveSc)
|
||||
if synthesized == nil {
|
||||
synthesized = &runtimeapi.LinuxContainerSecurityContext{}
|
||||
synthesized = &runtimeapi.LinuxContainerSecurityContext{
|
||||
MaskedPaths: securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount),
|
||||
ReadonlyPaths: securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount),
|
||||
}
|
||||
}
|
||||
|
||||
// set SeccompProfilePath.
|
||||
@ -67,6 +70,9 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po
|
||||
|
||||
synthesized.NoNewPrivs = securitycontext.AddNoNewPrivileges(effectiveSc)
|
||||
|
||||
synthesized.MaskedPaths = securitycontext.ConvertToRuntimeMaskedPaths(effectiveSc.ProcMount)
|
||||
synthesized.ReadonlyPaths = securitycontext.ConvertToRuntimeReadonlyPaths(effectiveSc.ProcMount)
|
||||
|
||||
return synthesized
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user