mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor files
This commit is contained in:
78
vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD
generated
vendored
Normal file
78
vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"generate.go",
|
||||
"status_manager.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/status",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"generate_test.go",
|
||||
"status_manager_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/status",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubelet/configmap:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/pod/testing:go_default_library",
|
||||
"//pkg/kubelet/secret:go_default_library",
|
||||
"//pkg/kubelet/status/testing:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/status/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
142
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
Normal file
142
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
UnknownContainerStatuses = "UnknownContainerStatuses"
|
||||
PodCompleted = "PodCompleted"
|
||||
ContainersNotReady = "ContainersNotReady"
|
||||
ContainersNotInitialized = "ContainersNotInitialized"
|
||||
)
|
||||
|
||||
// GeneratePodReadyCondition returns ready condition if all containers in a pod are ready, else it
|
||||
// returns an unready condition.
|
||||
func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
}
|
||||
unknownContainers := []string{}
|
||||
unreadyContainers := []string{}
|
||||
for _, container := range spec.Containers {
|
||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if !containerStatus.Ready {
|
||||
unreadyContainers = append(unreadyContainers, container.Name)
|
||||
}
|
||||
} else {
|
||||
unknownContainers = append(unknownContainers, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// If all containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: PodCompleted,
|
||||
}
|
||||
}
|
||||
|
||||
unreadyMessages := []string{}
|
||||
if len(unknownContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
|
||||
}
|
||||
if len(unreadyContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unready status: %s", unreadyContainers))
|
||||
}
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it
|
||||
// returns an uninitialized condition.
|
||||
func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil && len(spec.InitContainers) > 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
}
|
||||
unknownContainers := []string{}
|
||||
unreadyContainers := []string{}
|
||||
for _, container := range spec.InitContainers {
|
||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if !containerStatus.Ready {
|
||||
unreadyContainers = append(unreadyContainers, container.Name)
|
||||
}
|
||||
} else {
|
||||
unknownContainers = append(unknownContainers, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// If all init containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: PodCompleted,
|
||||
}
|
||||
}
|
||||
|
||||
unreadyMessages := []string{}
|
||||
if len(unknownContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
|
||||
}
|
||||
if len(unreadyContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with incomplete status: %s", unreadyContainers))
|
||||
}
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotInitialized,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
248
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate_test.go
generated
vendored
Normal file
248
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate_test.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
spec *v1.PodSpec
|
||||
containerStatuses []v1.ContainerStatus
|
||||
podPhase v1.PodPhase
|
||||
expected v1.PodCondition
|
||||
}{
|
||||
{
|
||||
spec: nil,
|
||||
containerStatuses: nil,
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, UnknownContainerStatuses, ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(true, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, ContainersNotReady, "containers with unknown status: [1234]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(true, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, ContainersNotReady, "containers with unknown status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getNotReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, ContainersNotReady, "containers with unready status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getNotReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodSucceeded,
|
||||
expected: getReadyCondition(false, PodCompleted, ""),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
condition := GeneratePodReadyCondition(test.spec, test.containerStatuses, test.podPhase)
|
||||
if !reflect.DeepEqual(condition, test.expected) {
|
||||
t.Errorf("On test case %v, expected:\n%+v\ngot\n%+v\n", i, test.expected, condition)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePodInitializedCondition(t *testing.T) {
|
||||
noInitContainer := &v1.PodSpec{}
|
||||
oneInitContainer := &v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
}
|
||||
twoInitContainer := &v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
spec *v1.PodSpec
|
||||
containerStatuses []v1.ContainerStatus
|
||||
podPhase v1.PodPhase
|
||||
expected v1.PodCondition
|
||||
}{
|
||||
{
|
||||
spec: twoInitContainer,
|
||||
containerStatuses: nil,
|
||||
podPhase: v1.PodRunning,
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
},
|
||||
},
|
||||
{
|
||||
spec: noInitContainer,
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
spec: oneInitContainer,
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotInitialized,
|
||||
},
|
||||
},
|
||||
{
|
||||
spec: twoInitContainer,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
spec: twoInitContainer,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotInitialized,
|
||||
},
|
||||
},
|
||||
{
|
||||
spec: twoInitContainer,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getNotReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotInitialized,
|
||||
},
|
||||
},
|
||||
{
|
||||
spec: oneInitContainer,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodSucceeded,
|
||||
expected: v1.PodCondition{
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: PodCompleted,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test.expected.Type = v1.PodInitialized
|
||||
condition := GeneratePodInitializedCondition(test.spec, test.containerStatuses, test.podPhase)
|
||||
assert.Equal(t, test.expected.Type, condition.Type)
|
||||
assert.Equal(t, test.expected.Status, condition.Status)
|
||||
assert.Equal(t, test.expected.Reason, condition.Reason)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func getReadyCondition(ready bool, reason, message string) v1.PodCondition {
|
||||
status := v1.ConditionFalse
|
||||
if ready {
|
||||
status = v1.ConditionTrue
|
||||
}
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
func getReadyStatus(cName string) v1.ContainerStatus {
|
||||
return v1.ContainerStatus{
|
||||
Name: cName,
|
||||
Ready: true,
|
||||
}
|
||||
}
|
||||
|
||||
func getNotReadyStatus(cName string) v1.ContainerStatus {
|
||||
return v1.ContainerStatus{
|
||||
Name: cName,
|
||||
Ready: false,
|
||||
}
|
||||
}
|
613
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
Normal file
613
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
Normal file
@ -0,0 +1,613 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
)
|
||||
|
||||
// A wrapper around v1.PodStatus that includes a version to enforce that stale pod statuses are
|
||||
// not sent to the API server.
|
||||
type versionedPodStatus struct {
|
||||
status v1.PodStatus
|
||||
// Monotonically increasing version number (per pod).
|
||||
version uint64
|
||||
// Pod name & namespace, for sending updates to API server.
|
||||
podName string
|
||||
podNamespace string
|
||||
}
|
||||
|
||||
type podStatusSyncRequest struct {
|
||||
podUID types.UID
|
||||
status versionedPodStatus
|
||||
}
|
||||
|
||||
// Updates pod statuses in apiserver. Writes only when new status has changed.
|
||||
// All methods are thread-safe.
|
||||
type manager struct {
|
||||
kubeClient clientset.Interface
|
||||
podManager kubepod.Manager
|
||||
// Map from pod UID to sync status of the corresponding pod.
|
||||
podStatuses map[types.UID]versionedPodStatus
|
||||
podStatusesLock sync.RWMutex
|
||||
podStatusChannel chan podStatusSyncRequest
|
||||
// Map from (mirror) pod UID to latest status version successfully sent to the API server.
|
||||
// apiStatusVersions must only be accessed from the sync thread.
|
||||
apiStatusVersions map[kubetypes.MirrorPodUID]uint64
|
||||
podDeletionSafety PodDeletionSafetyProvider
|
||||
}
|
||||
|
||||
// PodStatusProvider knows how to provide status for a pod. It's intended to be used by other components
|
||||
// that need to introspect status.
|
||||
type PodStatusProvider interface {
|
||||
// GetPodStatus returns the cached status for the provided pod UID, as well as whether it
|
||||
// was a cache hit.
|
||||
GetPodStatus(uid types.UID) (v1.PodStatus, bool)
|
||||
}
|
||||
|
||||
// An object which provides guarantees that a pod can be safely deleted.
|
||||
type PodDeletionSafetyProvider interface {
|
||||
// A function which returns true if the pod can safely be deleted
|
||||
PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool
|
||||
}
|
||||
|
||||
// Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with
|
||||
// the latest v1.PodStatus. It also syncs updates back to the API server.
|
||||
type Manager interface {
|
||||
PodStatusProvider
|
||||
|
||||
// Start the API server status sync loop.
|
||||
Start()
|
||||
|
||||
// SetPodStatus caches updates the cached status for the given pod, and triggers a status update.
|
||||
SetPodStatus(pod *v1.Pod, status v1.PodStatus)
|
||||
|
||||
// SetContainerReadiness updates the cached container status with the given readiness, and
|
||||
// triggers a status update.
|
||||
SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool)
|
||||
|
||||
// TerminatePod resets the container status for the provided pod to terminated and triggers
|
||||
// a status update.
|
||||
TerminatePod(pod *v1.Pod)
|
||||
|
||||
// RemoveOrphanedStatuses scans the status cache and removes any entries for pods not included in
|
||||
// the provided podUIDs.
|
||||
RemoveOrphanedStatuses(podUIDs map[types.UID]bool)
|
||||
}
|
||||
|
||||
const syncPeriod = 10 * time.Second
|
||||
|
||||
func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podDeletionSafety PodDeletionSafetyProvider) Manager {
|
||||
return &manager{
|
||||
kubeClient: kubeClient,
|
||||
podManager: podManager,
|
||||
podStatuses: make(map[types.UID]versionedPodStatus),
|
||||
podStatusChannel: make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses
|
||||
apiStatusVersions: make(map[kubetypes.MirrorPodUID]uint64),
|
||||
podDeletionSafety: podDeletionSafety,
|
||||
}
|
||||
}
|
||||
|
||||
// isStatusEqual returns true if the given pod statuses are equal, false otherwise.
|
||||
// This method normalizes the status before comparing so as to make sure that meaningless
|
||||
// changes will be ignored.
|
||||
func isStatusEqual(oldStatus, status *v1.PodStatus) bool {
|
||||
return apiequality.Semantic.DeepEqual(status, oldStatus)
|
||||
}
|
||||
|
||||
func (m *manager) Start() {
|
||||
// Don't start the status manager if we don't have a client. This will happen
|
||||
// on the master, where the kubelet is responsible for bootstrapping the pods
|
||||
// of the master components.
|
||||
if m.kubeClient == nil {
|
||||
glog.Infof("Kubernetes client is nil, not starting status manager.")
|
||||
return
|
||||
}
|
||||
|
||||
glog.Info("Starting to sync pod status with apiserver")
|
||||
syncTicker := time.Tick(syncPeriod)
|
||||
// syncPod and syncBatch share the same go routine to avoid sync races.
|
||||
go wait.Forever(func() {
|
||||
select {
|
||||
case syncRequest := <-m.podStatusChannel:
|
||||
glog.V(5).Infof("Status Manager: syncing pod: %q, with status: (%d, %v) from podStatusChannel",
|
||||
syncRequest.podUID, syncRequest.status.version, syncRequest.status.status)
|
||||
m.syncPod(syncRequest.podUID, syncRequest.status)
|
||||
case <-syncTicker:
|
||||
m.syncBatch()
|
||||
}
|
||||
}, 0)
|
||||
}
|
||||
|
||||
func (m *manager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
|
||||
m.podStatusesLock.RLock()
|
||||
defer m.podStatusesLock.RUnlock()
|
||||
status, ok := m.podStatuses[types.UID(m.podManager.TranslatePodUID(uid))]
|
||||
return status.status, ok
|
||||
}
|
||||
|
||||
func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
// Make sure we're caching a deep copy.
|
||||
status = *status.DeepCopy()
|
||||
|
||||
// Force a status update if deletion timestamp is set. This is necessary
|
||||
// because if the pod is in the non-running state, the pod worker still
|
||||
// needs to be able to trigger an update and/or deletion.
|
||||
m.updateStatusInternal(pod, status, pod.DeletionTimestamp != nil)
|
||||
}
|
||||
|
||||
func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
|
||||
pod, ok := m.podManager.GetPodByUID(podUID)
|
||||
if !ok {
|
||||
glog.V(4).Infof("Pod %q has been deleted, no need to update readiness", string(podUID))
|
||||
return
|
||||
}
|
||||
|
||||
oldStatus, found := m.podStatuses[pod.UID]
|
||||
if !found {
|
||||
glog.Warningf("Container readiness changed before pod has synced: %q - %q",
|
||||
format.Pod(pod), containerID.String())
|
||||
return
|
||||
}
|
||||
|
||||
// Find the container to update.
|
||||
containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String())
|
||||
if !ok {
|
||||
glog.Warningf("Container readiness changed for unknown container: %q - %q",
|
||||
format.Pod(pod), containerID.String())
|
||||
return
|
||||
}
|
||||
|
||||
if containerStatus.Ready == ready {
|
||||
glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready,
|
||||
format.Pod(pod), containerID.String())
|
||||
return
|
||||
}
|
||||
|
||||
// Make sure we're not updating the cached version.
|
||||
status := *oldStatus.status.DeepCopy()
|
||||
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
|
||||
containerStatus.Ready = ready
|
||||
|
||||
// Update pod condition.
|
||||
readyConditionIndex := -1
|
||||
for i, condition := range status.Conditions {
|
||||
if condition.Type == v1.PodReady {
|
||||
readyConditionIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
readyCondition := GeneratePodReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase)
|
||||
if readyConditionIndex != -1 {
|
||||
status.Conditions[readyConditionIndex] = readyCondition
|
||||
} else {
|
||||
glog.Warningf("PodStatus missing PodReady condition: %+v", status)
|
||||
status.Conditions = append(status.Conditions, readyCondition)
|
||||
}
|
||||
|
||||
m.updateStatusInternal(pod, status, false)
|
||||
}
|
||||
|
||||
func findContainerStatus(status *v1.PodStatus, containerID string) (containerStatus *v1.ContainerStatus, init bool, ok bool) {
|
||||
// Find the container to update.
|
||||
for i, c := range status.ContainerStatuses {
|
||||
if c.ContainerID == containerID {
|
||||
return &status.ContainerStatuses[i], false, true
|
||||
}
|
||||
}
|
||||
|
||||
for i, c := range status.InitContainerStatuses {
|
||||
if c.ContainerID == containerID {
|
||||
return &status.InitContainerStatuses[i], true, true
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false, false
|
||||
|
||||
}
|
||||
|
||||
func (m *manager) TerminatePod(pod *v1.Pod) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
oldStatus := &pod.Status
|
||||
if cachedStatus, ok := m.podStatuses[pod.UID]; ok {
|
||||
oldStatus = &cachedStatus.status
|
||||
}
|
||||
status := *oldStatus.DeepCopy()
|
||||
for i := range status.ContainerStatuses {
|
||||
status.ContainerStatuses[i].State = v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{},
|
||||
}
|
||||
}
|
||||
for i := range status.InitContainerStatuses {
|
||||
status.InitContainerStatuses[i].State = v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{},
|
||||
}
|
||||
}
|
||||
m.updateStatusInternal(pod, status, true)
|
||||
}
|
||||
|
||||
// checkContainerStateTransition ensures that no container is trying to transition
|
||||
// from a terminated to non-terminated state, which is illegal and indicates a
|
||||
// logical error in the kubelet.
|
||||
func checkContainerStateTransition(oldStatuses, newStatuses []v1.ContainerStatus, restartPolicy v1.RestartPolicy) error {
|
||||
// If we should always restart, containers are allowed to leave the terminated state
|
||||
if restartPolicy == v1.RestartPolicyAlways {
|
||||
return nil
|
||||
}
|
||||
for _, oldStatus := range oldStatuses {
|
||||
// Skip any container that wasn't terminated
|
||||
if oldStatus.State.Terminated == nil {
|
||||
continue
|
||||
}
|
||||
// Skip any container that failed but is allowed to restart
|
||||
if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == v1.RestartPolicyOnFailure {
|
||||
continue
|
||||
}
|
||||
for _, newStatus := range newStatuses {
|
||||
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
|
||||
return fmt.Errorf("terminated container %v attempted illegal transition to non-terminated state", newStatus.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateStatusInternal updates the internal status cache, and queues an update to the api server if
|
||||
// necessary. Returns whether an update was triggered.
|
||||
// This method IS NOT THREAD SAFE and must be called from a locked function.
|
||||
func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUpdate bool) bool {
|
||||
var oldStatus v1.PodStatus
|
||||
cachedStatus, isCached := m.podStatuses[pod.UID]
|
||||
if isCached {
|
||||
oldStatus = cachedStatus.status
|
||||
} else if mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod); ok {
|
||||
oldStatus = mirrorPod.Status
|
||||
} else {
|
||||
oldStatus = pod.Status
|
||||
}
|
||||
|
||||
// Check for illegal state transition in containers
|
||||
if err := checkContainerStateTransition(oldStatus.ContainerStatuses, status.ContainerStatuses, pod.Spec.RestartPolicy); err != nil {
|
||||
glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err)
|
||||
return false
|
||||
}
|
||||
if err := checkContainerStateTransition(oldStatus.InitContainerStatuses, status.InitContainerStatuses, pod.Spec.RestartPolicy); err != nil {
|
||||
glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Set ReadyCondition.LastTransitionTime.
|
||||
if _, readyCondition := podutil.GetPodCondition(&status, v1.PodReady); readyCondition != nil {
|
||||
// Need to set LastTransitionTime.
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, oldReadyCondition := podutil.GetPodCondition(&oldStatus, v1.PodReady)
|
||||
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
|
||||
lastTransitionTime = oldReadyCondition.LastTransitionTime
|
||||
}
|
||||
readyCondition.LastTransitionTime = lastTransitionTime
|
||||
}
|
||||
|
||||
// Set InitializedCondition.LastTransitionTime.
|
||||
if _, initCondition := podutil.GetPodCondition(&status, v1.PodInitialized); initCondition != nil {
|
||||
// Need to set LastTransitionTime.
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, oldInitCondition := podutil.GetPodCondition(&oldStatus, v1.PodInitialized)
|
||||
if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status {
|
||||
lastTransitionTime = oldInitCondition.LastTransitionTime
|
||||
}
|
||||
initCondition.LastTransitionTime = lastTransitionTime
|
||||
}
|
||||
|
||||
// ensure that the start time does not change across updates.
|
||||
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
|
||||
status.StartTime = oldStatus.StartTime
|
||||
} else if status.StartTime.IsZero() {
|
||||
// if the status has no start time, we need to set an initial time
|
||||
now := metav1.Now()
|
||||
status.StartTime = &now
|
||||
}
|
||||
|
||||
normalizeStatus(pod, &status)
|
||||
// The intent here is to prevent concurrent updates to a pod's status from
|
||||
// clobbering each other so the phase of a pod progresses monotonically.
|
||||
if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate {
|
||||
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)
|
||||
return false // No new status.
|
||||
}
|
||||
|
||||
newStatus := versionedPodStatus{
|
||||
status: status,
|
||||
version: cachedStatus.version + 1,
|
||||
podName: pod.Name,
|
||||
podNamespace: pod.Namespace,
|
||||
}
|
||||
m.podStatuses[pod.UID] = newStatus
|
||||
|
||||
select {
|
||||
case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}:
|
||||
glog.V(5).Infof("Status Manager: adding pod: %q, with status: (%q, %v) to podStatusChannel",
|
||||
pod.UID, newStatus.version, newStatus.status)
|
||||
return true
|
||||
default:
|
||||
// Let the periodic syncBatch handle the update if the channel is full.
|
||||
// We can't block, since we hold the mutex lock.
|
||||
glog.V(4).Infof("Skipping the status update for pod %q for now because the channel is full; status: %+v",
|
||||
format.Pod(pod), status)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// deletePodStatus simply removes the given pod from the status cache.
|
||||
func (m *manager) deletePodStatus(uid types.UID) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
delete(m.podStatuses, uid)
|
||||
}
|
||||
|
||||
// TODO(filipg): It'd be cleaner if we can do this without signal from user.
|
||||
func (m *manager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
for key := range m.podStatuses {
|
||||
if _, ok := podUIDs[key]; !ok {
|
||||
glog.V(5).Infof("Removing %q from status map.", key)
|
||||
delete(m.podStatuses, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncBatch syncs pods statuses with the apiserver.
|
||||
func (m *manager) syncBatch() {
|
||||
var updatedStatuses []podStatusSyncRequest
|
||||
podToMirror, mirrorToPod := m.podManager.GetUIDTranslations()
|
||||
func() { // Critical section
|
||||
m.podStatusesLock.RLock()
|
||||
defer m.podStatusesLock.RUnlock()
|
||||
|
||||
// Clean up orphaned versions.
|
||||
for uid := range m.apiStatusVersions {
|
||||
_, hasPod := m.podStatuses[types.UID(uid)]
|
||||
_, hasMirror := mirrorToPod[uid]
|
||||
if !hasPod && !hasMirror {
|
||||
delete(m.apiStatusVersions, uid)
|
||||
}
|
||||
}
|
||||
|
||||
for uid, status := range m.podStatuses {
|
||||
syncedUID := kubetypes.MirrorPodUID(uid)
|
||||
if mirrorUID, ok := podToMirror[kubetypes.ResolvedPodUID(uid)]; ok {
|
||||
if mirrorUID == "" {
|
||||
glog.V(5).Infof("Static pod %q (%s/%s) does not have a corresponding mirror pod; skipping", uid, status.podName, status.podNamespace)
|
||||
continue
|
||||
}
|
||||
syncedUID = mirrorUID
|
||||
}
|
||||
if m.needsUpdate(types.UID(syncedUID), status) {
|
||||
updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status})
|
||||
} else if m.needsReconcile(uid, status.status) {
|
||||
// Delete the apiStatusVersions here to force an update on the pod status
|
||||
// In most cases the deleted apiStatusVersions here should be filled
|
||||
// soon after the following syncPod() [If the syncPod() sync an update
|
||||
// successfully].
|
||||
delete(m.apiStatusVersions, syncedUID)
|
||||
updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, update := range updatedStatuses {
|
||||
glog.V(5).Infof("Status Manager: syncPod in syncbatch. pod UID: %q", update.podUID)
|
||||
m.syncPod(update.podUID, update.status)
|
||||
}
|
||||
}
|
||||
|
||||
// syncPod syncs the given status with the API server. The caller must not hold the lock.
|
||||
func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
||||
if !m.needsUpdate(uid, status) {
|
||||
glog.V(1).Infof("Status for pod %q is up-to-date; skipping", uid)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: make me easier to express from client code
|
||||
pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid)
|
||||
// If the Pod is deleted the status will be cleared in
|
||||
// RemoveOrphanedStatuses, so we just ignore the update here.
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to get status for pod %q: %v", format.PodDesc(status.podName, status.podNamespace, uid), err)
|
||||
return
|
||||
}
|
||||
|
||||
translatedUID := m.podManager.TranslatePodUID(pod.UID)
|
||||
// Type convert original uid just for the purpose of comparison.
|
||||
if len(translatedUID) > 0 && translatedUID != kubetypes.ResolvedPodUID(uid) {
|
||||
glog.V(2).Infof("Pod %q was deleted and then recreated, skipping status update; old UID %q, new UID %q", format.Pod(pod), uid, translatedUID)
|
||||
m.deletePodStatus(uid)
|
||||
return
|
||||
}
|
||||
pod.Status = status.status
|
||||
// TODO: handle conflict as a retry, make that easier too.
|
||||
newPod, err := m.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
|
||||
return
|
||||
}
|
||||
pod = newPod
|
||||
|
||||
glog.V(3).Infof("Status for pod %q updated successfully: (%d, %+v)", format.Pod(pod), status.version, status.status)
|
||||
m.apiStatusVersions[kubetypes.MirrorPodUID(pod.UID)] = status.version
|
||||
|
||||
// We don't handle graceful deletion of mirror pods.
|
||||
if m.canBeDeleted(pod, status.status) {
|
||||
deleteOptions := metav1.NewDeleteOptions(0)
|
||||
// Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace.
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID))
|
||||
err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod))
|
||||
m.deletePodStatus(uid)
|
||||
}
|
||||
}
|
||||
|
||||
// needsUpdate returns whether the status is stale for the given pod UID.
|
||||
// This method is not thread safe, and must only be accessed by the sync thread.
|
||||
func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool {
|
||||
latest, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(uid)]
|
||||
if !ok || latest < status.version {
|
||||
return true
|
||||
}
|
||||
pod, ok := m.podManager.GetPodByUID(uid)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return m.canBeDeleted(pod, status.status)
|
||||
}
|
||||
|
||||
func (m *manager) canBeDeleted(pod *v1.Pod, status v1.PodStatus) bool {
|
||||
if pod.DeletionTimestamp == nil || kubepod.IsMirrorPod(pod) {
|
||||
return false
|
||||
}
|
||||
return m.podDeletionSafety.PodResourcesAreReclaimed(pod, status)
|
||||
}
|
||||
|
||||
// needsReconcile compares the given status with the status in the pod manager (which
|
||||
// in fact comes from apiserver), returns whether the status needs to be reconciled with
|
||||
// the apiserver. Now when pod status is inconsistent between apiserver and kubelet,
|
||||
// kubelet should forcibly send an update to reconcile the inconsistence, because kubelet
|
||||
// should be the source of truth of pod status.
|
||||
// NOTE(random-liu): It's simpler to pass in mirror pod uid and get mirror pod by uid, but
|
||||
// now the pod manager only supports getting mirror pod by static pod, so we have to pass
|
||||
// static pod uid here.
|
||||
// TODO(random-liu): Simplify the logic when mirror pod manager is added.
|
||||
func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool {
|
||||
// The pod could be a static pod, so we should translate first.
|
||||
pod, ok := m.podManager.GetPodByUID(uid)
|
||||
if !ok {
|
||||
glog.V(4).Infof("Pod %q has been deleted, no need to reconcile", string(uid))
|
||||
return false
|
||||
}
|
||||
// If the pod is a static pod, we should check its mirror pod, because only status in mirror pod is meaningful to us.
|
||||
if kubepod.IsStaticPod(pod) {
|
||||
mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod)
|
||||
if !ok {
|
||||
glog.V(4).Infof("Static pod %q has no corresponding mirror pod, no need to reconcile", format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
pod = mirrorPod
|
||||
}
|
||||
|
||||
podStatus := pod.Status.DeepCopy()
|
||||
normalizeStatus(pod, podStatus)
|
||||
|
||||
if isStatusEqual(podStatus, &status) {
|
||||
// If the status from the source is the same with the cached status,
|
||||
// reconcile is not needed. Just return.
|
||||
return false
|
||||
}
|
||||
glog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %+v", format.Pod(pod),
|
||||
diff.ObjectDiff(podStatus, status))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// We add this function, because apiserver only supports *RFC3339* now, which means that the timestamp returned by
|
||||
// apiserver has no nanosecond information. However, the timestamp returned by metav1.Now() contains nanosecond,
|
||||
// so when we do comparison between status from apiserver and cached status, isStatusEqual() will always return false.
|
||||
// There is related issue #15262 and PR #15263 about this.
|
||||
// In fact, the best way to solve this is to do it on api side. However, for now, we normalize the status locally in
|
||||
// kubelet temporarily.
|
||||
// TODO(random-liu): Remove timestamp related logic after apiserver supports nanosecond or makes it consistent.
|
||||
func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus {
|
||||
bytesPerStatus := kubecontainer.MaxPodTerminationMessageLogLength
|
||||
if containers := len(pod.Spec.Containers) + len(pod.Spec.InitContainers); containers > 0 {
|
||||
bytesPerStatus = bytesPerStatus / containers
|
||||
}
|
||||
normalizeTimeStamp := func(t *metav1.Time) {
|
||||
*t = t.Rfc3339Copy()
|
||||
}
|
||||
normalizeContainerState := func(c *v1.ContainerState) {
|
||||
if c.Running != nil {
|
||||
normalizeTimeStamp(&c.Running.StartedAt)
|
||||
}
|
||||
if c.Terminated != nil {
|
||||
normalizeTimeStamp(&c.Terminated.StartedAt)
|
||||
normalizeTimeStamp(&c.Terminated.FinishedAt)
|
||||
if len(c.Terminated.Message) > bytesPerStatus {
|
||||
c.Terminated.Message = c.Terminated.Message[:bytesPerStatus]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if status.StartTime != nil {
|
||||
normalizeTimeStamp(status.StartTime)
|
||||
}
|
||||
for i := range status.Conditions {
|
||||
condition := &status.Conditions[i]
|
||||
normalizeTimeStamp(&condition.LastProbeTime)
|
||||
normalizeTimeStamp(&condition.LastTransitionTime)
|
||||
}
|
||||
|
||||
// update container statuses
|
||||
for i := range status.ContainerStatuses {
|
||||
cstatus := &status.ContainerStatuses[i]
|
||||
normalizeContainerState(&cstatus.State)
|
||||
normalizeContainerState(&cstatus.LastTerminationState)
|
||||
}
|
||||
// Sort the container statuses, so that the order won't affect the result of comparison
|
||||
sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses))
|
||||
|
||||
// update init container statuses
|
||||
for i := range status.InitContainerStatuses {
|
||||
cstatus := &status.InitContainerStatuses[i]
|
||||
normalizeContainerState(&cstatus.State)
|
||||
normalizeContainerState(&cstatus.LastTerminationState)
|
||||
}
|
||||
// Sort the container statuses, so that the order won't affect the result of comparison
|
||||
kubetypes.SortInitContainerStatuses(pod, status.InitContainerStatuses)
|
||||
return status
|
||||
}
|
811
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager_test.go
generated
vendored
Normal file
811
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager_test.go
generated
vendored
Normal file
@ -0,0 +1,811 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
kubeconfigmap "k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
kubesecret "k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
)
|
||||
|
||||
// Generate new instance of test pod with the same initial value.
|
||||
func getTestPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// After adding reconciliation, if status in pod manager is different from the cached status, a reconciliation
|
||||
// will be triggered, which will mess up all the old unit test.
|
||||
// To simplify the implementation of unit test, we add testSyncBatch() here, it will make sure the statuses in
|
||||
// pod manager the same with cached ones before syncBatch() so as to avoid reconciling.
|
||||
func (m *manager) testSyncBatch() {
|
||||
for uid, status := range m.podStatuses {
|
||||
pod, ok := m.podManager.GetPodByUID(uid)
|
||||
if ok {
|
||||
pod.Status = status.status
|
||||
}
|
||||
pod, ok = m.podManager.GetMirrorPodByPod(pod)
|
||||
if ok {
|
||||
pod.Status = status.status
|
||||
}
|
||||
}
|
||||
m.syncBatch()
|
||||
}
|
||||
|
||||
func newTestManager(kubeClient clientset.Interface) *manager {
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), kubesecret.NewFakeManager(), kubeconfigmap.NewFakeManager())
|
||||
podManager.AddPod(getTestPod())
|
||||
return NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}).(*manager)
|
||||
}
|
||||
|
||||
func generateRandomMessage() string {
|
||||
return strconv.Itoa(rand.Int())
|
||||
}
|
||||
|
||||
func getRandomPodStatus() v1.PodStatus {
|
||||
return v1.PodStatus{
|
||||
Message: generateRandomMessage(),
|
||||
}
|
||||
}
|
||||
|
||||
func verifyActions(t *testing.T, manager *manager, expectedActions []core.Action) {
|
||||
manager.consumeUpdates()
|
||||
actions := manager.kubeClient.(*fake.Clientset).Actions()
|
||||
defer manager.kubeClient.(*fake.Clientset).ClearActions()
|
||||
if len(actions) != len(expectedActions) {
|
||||
t.Fatalf("unexpected actions, got: %+v expected: %+v", actions, expectedActions)
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(actions); i++ {
|
||||
e := expectedActions[i]
|
||||
a := actions[i]
|
||||
if !a.Matches(e.GetVerb(), e.GetResource().Resource) || a.GetSubresource() != e.GetSubresource() {
|
||||
t.Errorf("unexpected actions, got: %+v expected: %+v", actions, expectedActions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyUpdates(t *testing.T, manager *manager, expectedUpdates int) {
|
||||
// Consume all updates in the channel.
|
||||
numUpdates := manager.consumeUpdates()
|
||||
if numUpdates != expectedUpdates {
|
||||
t.Errorf("unexpected number of updates %d, expected %d", numUpdates, expectedUpdates)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) consumeUpdates() int {
|
||||
updates := 0
|
||||
for {
|
||||
select {
|
||||
case syncRequest := <-m.podStatusChannel:
|
||||
m.syncPod(syncRequest.podUID, syncRequest.status)
|
||||
updates++
|
||||
default:
|
||||
return updates
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStatus(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
verifyUpdates(t, syncer, 1)
|
||||
|
||||
status := expectPodStatus(t, syncer, testPod)
|
||||
if status.StartTime.IsZero() {
|
||||
t.Errorf("SetPodStatus did not set a proper start time value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStatusPreservesPodStartTime(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Status: v1.PodStatus{},
|
||||
}
|
||||
now := metav1.Now()
|
||||
startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
|
||||
pod.Status.StartTime = &startTime
|
||||
syncer.SetPodStatus(pod, getRandomPodStatus())
|
||||
|
||||
status := expectPodStatus(t, syncer, pod)
|
||||
if !status.StartTime.Time.Equal(startTime.Time) {
|
||||
t.Errorf("Unexpected start time, expected %v, actual %v", startTime, status.StartTime)
|
||||
}
|
||||
}
|
||||
|
||||
func getReadyPodStatus() v1.PodStatus {
|
||||
return v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStatusSetsReadyTransitionTime(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
podStatus := getReadyPodStatus()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Status: v1.PodStatus{},
|
||||
}
|
||||
syncer.SetPodStatus(pod, podStatus)
|
||||
verifyUpdates(t, syncer, 1)
|
||||
status := expectPodStatus(t, syncer, pod)
|
||||
readyCondition := podutil.GetPodReadyCondition(status)
|
||||
if readyCondition.LastTransitionTime.IsZero() {
|
||||
t.Errorf("Unexpected: last transition time not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangedStatus(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
verifyUpdates(t, syncer, 2)
|
||||
}
|
||||
|
||||
func TestChangedStatusKeepsStartTime(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
now := metav1.Now()
|
||||
firstStatus := getRandomPodStatus()
|
||||
firstStatus.StartTime = &now
|
||||
syncer.SetPodStatus(testPod, firstStatus)
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
verifyUpdates(t, syncer, 2)
|
||||
finalStatus := expectPodStatus(t, syncer, testPod)
|
||||
if finalStatus.StartTime.IsZero() {
|
||||
t.Errorf("StartTime should not be zero")
|
||||
}
|
||||
expected := now.Rfc3339Copy()
|
||||
if !finalStatus.StartTime.Equal(&expected) {
|
||||
t.Errorf("Expected %v, but got %v", expected, finalStatus.StartTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
podStatus := getReadyPodStatus()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Status: v1.PodStatus{},
|
||||
}
|
||||
syncer.SetPodStatus(pod, podStatus)
|
||||
verifyUpdates(t, syncer, 1)
|
||||
oldStatus := expectPodStatus(t, syncer, pod)
|
||||
anotherStatus := getReadyPodStatus()
|
||||
anotherStatus.Conditions[0].Status = v1.ConditionFalse
|
||||
syncer.SetPodStatus(pod, anotherStatus)
|
||||
verifyUpdates(t, syncer, 1)
|
||||
newStatus := expectPodStatus(t, syncer, pod)
|
||||
|
||||
oldReadyCondition := podutil.GetPodReadyCondition(oldStatus)
|
||||
newReadyCondition := podutil.GetPodReadyCondition(newStatus)
|
||||
if newReadyCondition.LastTransitionTime.IsZero() {
|
||||
t.Errorf("Unexpected: last transition time not set")
|
||||
}
|
||||
if newReadyCondition.LastTransitionTime.Before(&oldReadyCondition.LastTransitionTime) {
|
||||
t.Errorf("Unexpected: new transition time %s, is before old transition time %s", newReadyCondition.LastTransitionTime, oldReadyCondition.LastTransitionTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnchangedStatus(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
podStatus := getRandomPodStatus()
|
||||
syncer.SetPodStatus(testPod, podStatus)
|
||||
syncer.SetPodStatus(testPod, podStatus)
|
||||
verifyUpdates(t, syncer, 1)
|
||||
}
|
||||
|
||||
func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
podStatus := getReadyPodStatus()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Status: v1.PodStatus{},
|
||||
}
|
||||
syncer.SetPodStatus(pod, podStatus)
|
||||
verifyUpdates(t, syncer, 1)
|
||||
oldStatus := expectPodStatus(t, syncer, pod)
|
||||
anotherStatus := getReadyPodStatus()
|
||||
syncer.SetPodStatus(pod, anotherStatus)
|
||||
// No update.
|
||||
verifyUpdates(t, syncer, 0)
|
||||
newStatus := expectPodStatus(t, syncer, pod)
|
||||
|
||||
oldReadyCondition := podutil.GetPodReadyCondition(oldStatus)
|
||||
newReadyCondition := podutil.GetPodReadyCondition(newStatus)
|
||||
if newReadyCondition.LastTransitionTime.IsZero() {
|
||||
t.Errorf("Unexpected: last transition time not set")
|
||||
}
|
||||
if !oldReadyCondition.LastTransitionTime.Equal(&newReadyCondition.LastTransitionTime) {
|
||||
t.Errorf("Unexpected: new transition time %s, is not equal to old transition time %s", newReadyCondition.LastTransitionTime, oldReadyCondition.LastTransitionTime)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodIgnoresNotFound(t *testing.T) {
|
||||
client := fake.Clientset{}
|
||||
syncer := newTestManager(&client)
|
||||
client.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, nil, errors.NewNotFound(api.Resource("pods"), "test-pod")
|
||||
})
|
||||
syncer.SetPodStatus(getTestPod(), getRandomPodStatus())
|
||||
verifyActions(t, syncer, []core.Action{getAction()})
|
||||
}
|
||||
|
||||
func TestSyncPod(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
syncer.kubeClient = fake.NewSimpleClientset(testPod)
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
verifyActions(t, syncer, []core.Action{getAction(), updateAction()})
|
||||
}
|
||||
|
||||
func TestSyncPodChecksMismatchedUID(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
pod := getTestPod()
|
||||
pod.UID = "first"
|
||||
syncer.podManager.AddPod(pod)
|
||||
differentPod := getTestPod()
|
||||
differentPod.UID = "second"
|
||||
syncer.podManager.AddPod(differentPod)
|
||||
syncer.kubeClient = fake.NewSimpleClientset(pod)
|
||||
syncer.SetPodStatus(differentPod, getRandomPodStatus())
|
||||
verifyActions(t, syncer, []core.Action{getAction()})
|
||||
}
|
||||
|
||||
func TestSyncPodNoDeadlock(t *testing.T) {
|
||||
client := &fake.Clientset{}
|
||||
m := newTestManager(client)
|
||||
pod := getTestPod()
|
||||
|
||||
// Setup fake client.
|
||||
var ret *v1.Pod
|
||||
var err error
|
||||
client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
switch action := action.(type) {
|
||||
case core.GetAction:
|
||||
assert.Equal(t, pod.Name, action.GetName(), "Unexpeted GetAction: %+v", action)
|
||||
case core.UpdateAction:
|
||||
assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpeted UpdateAction: %+v", action)
|
||||
default:
|
||||
assert.Fail(t, "Unexpected Action: %+v", action)
|
||||
}
|
||||
return true, ret, err
|
||||
})
|
||||
|
||||
pod.Status.ContainerStatuses = []v1.ContainerStatus{{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}}
|
||||
|
||||
t.Logf("Pod not found.")
|
||||
ret = nil
|
||||
err = errors.NewNotFound(api.Resource("pods"), pod.Name)
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction()})
|
||||
|
||||
t.Logf("Pod was recreated.")
|
||||
ret = getTestPod()
|
||||
ret.UID = "other_pod"
|
||||
err = nil
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction()})
|
||||
|
||||
t.Logf("Pod not deleted (success case).")
|
||||
ret = getTestPod()
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
|
||||
t.Logf("Pod is terminated, but still running.")
|
||||
pod.DeletionTimestamp = new(metav1.Time)
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
|
||||
t.Logf("Pod is terminated successfully.")
|
||||
pod.Status.ContainerStatuses[0].State.Running = nil
|
||||
pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{}
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
|
||||
t.Logf("Error case.")
|
||||
ret = nil
|
||||
err = fmt.Errorf("intentional test error")
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction()})
|
||||
}
|
||||
|
||||
func TestStaleUpdates(t *testing.T) {
|
||||
pod := getTestPod()
|
||||
client := fake.NewSimpleClientset(pod)
|
||||
m := newTestManager(client)
|
||||
|
||||
status := v1.PodStatus{Message: "initial status"}
|
||||
m.SetPodStatus(pod, status)
|
||||
status.Message = "first version bump"
|
||||
m.SetPodStatus(pod, status)
|
||||
status.Message = "second version bump"
|
||||
m.SetPodStatus(pod, status)
|
||||
|
||||
t.Logf("sync batch before syncPods pushes latest status, so we should see three statuses in the channel, but only one update")
|
||||
m.syncBatch()
|
||||
verifyUpdates(t, m, 3)
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
t.Logf("Nothing left in the channel to sync")
|
||||
verifyActions(t, m, []core.Action{})
|
||||
|
||||
t.Log("Unchanged status should not send an update.")
|
||||
m.SetPodStatus(pod, status)
|
||||
verifyUpdates(t, m, 0)
|
||||
|
||||
t.Log("... unless it's stale.")
|
||||
mirrorPodUID := kubetypes.MirrorPodUID(pod.UID)
|
||||
m.apiStatusVersions[mirrorPodUID] = m.apiStatusVersions[mirrorPodUID] - 1
|
||||
|
||||
m.SetPodStatus(pod, status)
|
||||
m.syncBatch()
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
|
||||
t.Logf("Nothing stuck in the pipe.")
|
||||
verifyUpdates(t, m, 0)
|
||||
}
|
||||
|
||||
// shuffle returns a new shuffled list of container statuses.
|
||||
func shuffle(statuses []v1.ContainerStatus) []v1.ContainerStatus {
|
||||
numStatuses := len(statuses)
|
||||
randIndexes := rand.Perm(numStatuses)
|
||||
shuffled := make([]v1.ContainerStatus, numStatuses)
|
||||
for i := 0; i < numStatuses; i++ {
|
||||
shuffled[i] = statuses[randIndexes[i]]
|
||||
}
|
||||
return shuffled
|
||||
}
|
||||
|
||||
func TestStatusEquality(t *testing.T) {
|
||||
pod := v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
containerStatus := []v1.ContainerStatus{}
|
||||
for i := 0; i < 10; i++ {
|
||||
s := v1.ContainerStatus{
|
||||
Name: fmt.Sprintf("container%d", i),
|
||||
}
|
||||
containerStatus = append(containerStatus, s)
|
||||
}
|
||||
podStatus := v1.PodStatus{
|
||||
ContainerStatuses: containerStatus,
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
oldPodStatus := v1.PodStatus{
|
||||
ContainerStatuses: shuffle(podStatus.ContainerStatuses),
|
||||
}
|
||||
normalizeStatus(&pod, &oldPodStatus)
|
||||
normalizeStatus(&pod, &podStatus)
|
||||
if !isStatusEqual(&oldPodStatus, &podStatus) {
|
||||
t.Fatalf("Order of container statuses should not affect normalized equality.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusNormalizationEnforcesMaxBytes(t *testing.T) {
|
||||
pod := v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
containerStatus := []v1.ContainerStatus{}
|
||||
for i := 0; i < 48; i++ {
|
||||
s := v1.ContainerStatus{
|
||||
Name: fmt.Sprintf("container%d", i),
|
||||
LastTerminationState: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
Message: strings.Repeat("abcdefgh", int(24+i%3)),
|
||||
},
|
||||
},
|
||||
}
|
||||
containerStatus = append(containerStatus, s)
|
||||
}
|
||||
podStatus := v1.PodStatus{
|
||||
InitContainerStatuses: containerStatus[:24],
|
||||
ContainerStatuses: containerStatus[24:],
|
||||
}
|
||||
result := normalizeStatus(&pod, &podStatus)
|
||||
count := 0
|
||||
for _, s := range result.InitContainerStatuses {
|
||||
l := len(s.LastTerminationState.Terminated.Message)
|
||||
if l < 192 || l > 256 {
|
||||
t.Errorf("container message had length %d", l)
|
||||
}
|
||||
count += l
|
||||
}
|
||||
if count > kubecontainer.MaxPodTerminationMessageLogLength {
|
||||
t.Errorf("message length not truncated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStaticPod(t *testing.T) {
|
||||
staticPod := getTestPod()
|
||||
staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
|
||||
mirrorPod := getTestPod()
|
||||
mirrorPod.UID = "mirror-12345678"
|
||||
mirrorPod.Annotations = map[string]string{
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
}
|
||||
client := fake.NewSimpleClientset(mirrorPod)
|
||||
m := newTestManager(client)
|
||||
|
||||
t.Logf("Create the static pod")
|
||||
m.podManager.AddPod(staticPod)
|
||||
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
|
||||
|
||||
status := getRandomPodStatus()
|
||||
now := metav1.Now()
|
||||
status.StartTime = &now
|
||||
m.SetPodStatus(staticPod, status)
|
||||
|
||||
t.Logf("Should be able to get the static pod status from status manager")
|
||||
retrievedStatus := expectPodStatus(t, m, staticPod)
|
||||
normalizeStatus(staticPod, &status)
|
||||
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
|
||||
t.Logf("Should not sync pod in syncBatch because there is no corresponding mirror pod for the static pod.")
|
||||
m.syncBatch()
|
||||
assert.Equal(t, len(m.kubeClient.(*fake.Clientset).Actions()), 0, "Expected no updates after syncBatch, got %+v", m.kubeClient.(*fake.Clientset).Actions())
|
||||
|
||||
t.Logf("Create the mirror pod")
|
||||
m.podManager.AddPod(mirrorPod)
|
||||
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
|
||||
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), kubetypes.ResolvedPodUID(staticPod.UID))
|
||||
|
||||
t.Logf("Should be able to get the mirror pod status from status manager")
|
||||
retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID)
|
||||
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
|
||||
t.Logf("Should sync pod because the corresponding mirror pod is created")
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
|
||||
t.Logf("syncBatch should not sync any pods because nothing is changed.")
|
||||
m.testSyncBatch()
|
||||
verifyActions(t, m, []core.Action{})
|
||||
|
||||
t.Logf("Change mirror pod identity.")
|
||||
m.podManager.DeletePod(mirrorPod)
|
||||
mirrorPod.UID = "new-mirror-pod"
|
||||
mirrorPod.Status = v1.PodStatus{}
|
||||
m.podManager.AddPod(mirrorPod)
|
||||
|
||||
t.Logf("Should not update to mirror pod, because UID has changed.")
|
||||
m.syncBatch()
|
||||
verifyActions(t, m, []core.Action{getAction()})
|
||||
}
|
||||
|
||||
func TestTerminatePod(t *testing.T) {
|
||||
syncer := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
t.Logf("update the pod's status to Failed. TerminatePod should preserve this status update.")
|
||||
firstStatus := getRandomPodStatus()
|
||||
firstStatus.Phase = v1.PodFailed
|
||||
syncer.SetPodStatus(testPod, firstStatus)
|
||||
|
||||
t.Logf("set the testPod to a pod with Phase running, to simulate a stale pod")
|
||||
testPod.Status = getRandomPodStatus()
|
||||
testPod.Status.Phase = v1.PodRunning
|
||||
|
||||
syncer.TerminatePod(testPod)
|
||||
|
||||
t.Logf("we expect the container statuses to have changed to terminated")
|
||||
newStatus := expectPodStatus(t, syncer, testPod)
|
||||
for i := range newStatus.ContainerStatuses {
|
||||
assert.False(t, newStatus.ContainerStatuses[i].State.Terminated == nil, "expected containers to be terminated")
|
||||
}
|
||||
for i := range newStatus.InitContainerStatuses {
|
||||
assert.False(t, newStatus.InitContainerStatuses[i].State.Terminated == nil, "expected init containers to be terminated")
|
||||
}
|
||||
|
||||
t.Logf("we expect the previous status update to be preserved.")
|
||||
assert.Equal(t, newStatus.Phase, firstStatus.Phase)
|
||||
assert.Equal(t, newStatus.Message, firstStatus.Message)
|
||||
}
|
||||
|
||||
func TestSetContainerReadiness(t *testing.T) {
|
||||
cID1 := kubecontainer.ContainerID{Type: "test", ID: "1"}
|
||||
cID2 := kubecontainer.ContainerID{Type: "test", ID: "2"}
|
||||
containerStatuses := []v1.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
ContainerID: cID1.String(),
|
||||
Ready: false,
|
||||
}, {
|
||||
Name: "c2",
|
||||
ContainerID: cID2.String(),
|
||||
Ready: false,
|
||||
},
|
||||
}
|
||||
status := v1.PodStatus{
|
||||
ContainerStatuses: containerStatuses,
|
||||
Conditions: []v1.PodCondition{{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
}},
|
||||
}
|
||||
pod := getTestPod()
|
||||
pod.Spec.Containers = []v1.Container{{Name: "c1"}, {Name: "c2"}}
|
||||
|
||||
// Verify expected readiness of containers & pod.
|
||||
verifyReadiness := func(step string, status *v1.PodStatus, c1Ready, c2Ready, podReady bool) {
|
||||
for _, c := range status.ContainerStatuses {
|
||||
switch c.ContainerID {
|
||||
case cID1.String():
|
||||
if c.Ready != c1Ready {
|
||||
t.Errorf("[%s] Expected readiness of c1 to be %v but was %v", step, c1Ready, c.Ready)
|
||||
}
|
||||
case cID2.String():
|
||||
if c.Ready != c2Ready {
|
||||
t.Errorf("[%s] Expected readiness of c2 to be %v but was %v", step, c2Ready, c.Ready)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("[%s] Unexpected container: %+v", step, c)
|
||||
}
|
||||
}
|
||||
if status.Conditions[0].Type != v1.PodReady {
|
||||
t.Fatalf("[%s] Unexpected condition: %+v", step, status.Conditions[0])
|
||||
} else if ready := (status.Conditions[0].Status == v1.ConditionTrue); ready != podReady {
|
||||
t.Errorf("[%s] Expected readiness of pod to be %v but was %v", step, podReady, ready)
|
||||
}
|
||||
}
|
||||
|
||||
m := newTestManager(&fake.Clientset{})
|
||||
// Add test pod because the container spec has been changed.
|
||||
m.podManager.AddPod(pod)
|
||||
|
||||
t.Log("Setting readiness before status should fail.")
|
||||
m.SetContainerReadiness(pod.UID, cID1, true)
|
||||
verifyUpdates(t, m, 0)
|
||||
if status, ok := m.GetPodStatus(pod.UID); ok {
|
||||
t.Errorf("Unexpected PodStatus: %+v", status)
|
||||
}
|
||||
|
||||
t.Log("Setting initial status.")
|
||||
m.SetPodStatus(pod, status)
|
||||
verifyUpdates(t, m, 1)
|
||||
status = expectPodStatus(t, m, pod)
|
||||
verifyReadiness("initial", &status, false, false, false)
|
||||
|
||||
t.Log("Setting unchanged readiness should do nothing.")
|
||||
m.SetContainerReadiness(pod.UID, cID1, false)
|
||||
verifyUpdates(t, m, 0)
|
||||
status = expectPodStatus(t, m, pod)
|
||||
verifyReadiness("unchanged", &status, false, false, false)
|
||||
|
||||
t.Log("Setting container readiness should generate update but not pod readiness.")
|
||||
m.SetContainerReadiness(pod.UID, cID1, true)
|
||||
verifyUpdates(t, m, 1)
|
||||
status = expectPodStatus(t, m, pod)
|
||||
verifyReadiness("c1 ready", &status, true, false, false)
|
||||
|
||||
t.Log("Setting both containers to ready should update pod readiness.")
|
||||
m.SetContainerReadiness(pod.UID, cID2, true)
|
||||
verifyUpdates(t, m, 1)
|
||||
status = expectPodStatus(t, m, pod)
|
||||
verifyReadiness("all ready", &status, true, true, true)
|
||||
|
||||
t.Log("Setting non-existent container readiness should fail.")
|
||||
m.SetContainerReadiness(pod.UID, kubecontainer.ContainerID{Type: "test", ID: "foo"}, true)
|
||||
verifyUpdates(t, m, 0)
|
||||
status = expectPodStatus(t, m, pod)
|
||||
verifyReadiness("ignore non-existent", &status, true, true, true)
|
||||
}
|
||||
|
||||
func TestSyncBatchCleanupVersions(t *testing.T) {
|
||||
m := newTestManager(&fake.Clientset{})
|
||||
testPod := getTestPod()
|
||||
mirrorPod := getTestPod()
|
||||
mirrorPod.UID = "mirror-uid"
|
||||
mirrorPod.Name = "mirror_pod"
|
||||
mirrorPod.Annotations = map[string]string{
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
}
|
||||
|
||||
t.Logf("Orphaned pods should be removed.")
|
||||
m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)] = 100
|
||||
m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)] = 200
|
||||
m.syncBatch()
|
||||
if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)]; ok {
|
||||
t.Errorf("Should have cleared status for testPod")
|
||||
}
|
||||
if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)]; ok {
|
||||
t.Errorf("Should have cleared status for mirrorPod")
|
||||
}
|
||||
|
||||
t.Logf("Non-orphaned pods should not be removed.")
|
||||
m.SetPodStatus(testPod, getRandomPodStatus())
|
||||
m.podManager.AddPod(mirrorPod)
|
||||
staticPod := mirrorPod
|
||||
staticPod.UID = "static-uid"
|
||||
staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
|
||||
m.podManager.AddPod(staticPod)
|
||||
m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)] = 100
|
||||
m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)] = 200
|
||||
m.testSyncBatch()
|
||||
if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)]; !ok {
|
||||
t.Errorf("Should not have cleared status for testPod")
|
||||
}
|
||||
if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)]; !ok {
|
||||
t.Errorf("Should not have cleared status for mirrorPod")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcilePodStatus(t *testing.T) {
|
||||
testPod := getTestPod()
|
||||
client := fake.NewSimpleClientset(testPod)
|
||||
syncer := newTestManager(client)
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
t.Logf("Call syncBatch directly to test reconcile")
|
||||
syncer.syncBatch() // The apiStatusVersions should be set now
|
||||
client.ClearActions()
|
||||
|
||||
podStatus, ok := syncer.GetPodStatus(testPod.UID)
|
||||
if !ok {
|
||||
t.Fatalf("Should find pod status for pod: %#v", testPod)
|
||||
}
|
||||
testPod.Status = podStatus
|
||||
|
||||
t.Logf("If the pod status is the same, a reconciliation is not needed and syncBatch should do nothing")
|
||||
syncer.podManager.UpdatePod(testPod)
|
||||
if syncer.needsReconcile(testPod.UID, podStatus) {
|
||||
t.Errorf("Pod status is the same, a reconciliation is not needed")
|
||||
}
|
||||
syncer.syncBatch()
|
||||
verifyActions(t, syncer, []core.Action{})
|
||||
|
||||
// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
|
||||
// a reconciliation is not needed, syncBatch should do nothing.
|
||||
// The StartTime should have been set in SetPodStatus().
|
||||
// TODO(random-liu): Remove this later when api becomes consistent for timestamp.
|
||||
t.Logf("Syncbatch should do nothing, as a reconciliation is not required")
|
||||
normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy()
|
||||
testPod.Status.StartTime = &normalizedStartTime
|
||||
syncer.podManager.UpdatePod(testPod)
|
||||
if syncer.needsReconcile(testPod.UID, podStatus) {
|
||||
t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed")
|
||||
}
|
||||
syncer.syncBatch()
|
||||
verifyActions(t, syncer, []core.Action{})
|
||||
|
||||
t.Logf("If the pod status is different, a reconciliation is needed, syncBatch should trigger an update")
|
||||
testPod.Status = getRandomPodStatus()
|
||||
syncer.podManager.UpdatePod(testPod)
|
||||
if !syncer.needsReconcile(testPod.UID, podStatus) {
|
||||
t.Errorf("Pod status is different, a reconciliation is needed")
|
||||
}
|
||||
syncer.syncBatch()
|
||||
verifyActions(t, syncer, []core.Action{getAction(), updateAction()})
|
||||
}
|
||||
|
||||
func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus {
|
||||
status, ok := m.GetPodStatus(pod.UID)
|
||||
if !ok {
|
||||
t.Fatalf("Expected PodStatus for %q not found", pod.UID)
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func TestDeletePods(t *testing.T) {
|
||||
pod := getTestPod()
|
||||
t.Logf("Set the deletion timestamp.")
|
||||
pod.DeletionTimestamp = new(metav1.Time)
|
||||
client := fake.NewSimpleClientset(pod)
|
||||
m := newTestManager(client)
|
||||
m.podManager.AddPod(pod)
|
||||
|
||||
status := getRandomPodStatus()
|
||||
now := metav1.Now()
|
||||
status.StartTime = &now
|
||||
m.SetPodStatus(pod, status)
|
||||
|
||||
t.Logf("Expect to see a delete action.")
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction(), deleteAction()})
|
||||
}
|
||||
|
||||
func TestDoNotDeleteMirrorPods(t *testing.T) {
|
||||
staticPod := getTestPod()
|
||||
staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
|
||||
mirrorPod := getTestPod()
|
||||
mirrorPod.UID = "mirror-12345678"
|
||||
mirrorPod.Annotations = map[string]string{
|
||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
}
|
||||
t.Logf("Set the deletion timestamp.")
|
||||
mirrorPod.DeletionTimestamp = new(metav1.Time)
|
||||
client := fake.NewSimpleClientset(mirrorPod)
|
||||
m := newTestManager(client)
|
||||
m.podManager.AddPod(staticPod)
|
||||
m.podManager.AddPod(mirrorPod)
|
||||
t.Logf("Verify setup.")
|
||||
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
|
||||
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
|
||||
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), kubetypes.ResolvedPodUID(staticPod.UID))
|
||||
|
||||
status := getRandomPodStatus()
|
||||
now := metav1.Now()
|
||||
status.StartTime = &now
|
||||
m.SetPodStatus(staticPod, status)
|
||||
|
||||
t.Logf("Expect not to see a delete action.")
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
}
|
||||
|
||||
func getAction() core.GetAction {
|
||||
return core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}}
|
||||
}
|
||||
|
||||
func updateAction() core.UpdateAction {
|
||||
return core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}
|
||||
}
|
||||
|
||||
func deleteAction() core.DeleteAction {
|
||||
return core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: schema.GroupVersionResource{Resource: "pods"}}}
|
||||
}
|
26
vendor/k8s.io/kubernetes/pkg/kubelet/status/testing/BUILD
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/pkg/kubelet/status/testing/BUILD
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake_pod_deletion_safety.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/status/testing",
|
||||
deps = ["//vendor/k8s.io/api/core/v1:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
25
vendor/k8s.io/kubernetes/pkg/kubelet/status/testing/fake_pod_deletion_safety.go
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/pkg/kubelet/status/testing/fake_pod_deletion_safety.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import "k8s.io/api/core/v1"
|
||||
|
||||
type FakePodDeletionSafetyProvider struct{}
|
||||
|
||||
func (f *FakePodDeletionSafetyProvider) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool {
|
||||
return true
|
||||
}
|
Reference in New Issue
Block a user