mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
1
vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/status/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/util/pod:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
@ -29,15 +29,16 @@ const (
|
||||
PodCompleted = "PodCompleted"
|
||||
ContainersNotReady = "ContainersNotReady"
|
||||
ContainersNotInitialized = "ContainersNotInitialized"
|
||||
ReadinessGatesNotReady = "ReadinessGatesNotReady"
|
||||
)
|
||||
|
||||
// GeneratePodReadyCondition returns ready condition if all containers in a pod are ready, else it
|
||||
// returns an unready condition.
|
||||
func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// GenerateContainersReadyCondition returns the status of "ContainersReady" condition.
|
||||
// The status of "ContainersReady" condition is true when all containers are ready.
|
||||
func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
@ -57,12 +58,13 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.Containe
|
||||
// If all containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: PodCompleted,
|
||||
}
|
||||
}
|
||||
|
||||
// Generate message for containers in unknown condition.
|
||||
unreadyMessages := []string{}
|
||||
if len(unknownContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
|
||||
@ -73,13 +75,57 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.Containe
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
// GeneratePodReadyCondition returns "Ready" condition of a pod.
|
||||
// The status of "Ready" condition is "True", if all containers in a pod are ready
|
||||
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
|
||||
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
containersReady := GenerateContainersReadyCondition(spec, containerStatuses, podPhase)
|
||||
// If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady.
|
||||
if containersReady.Status != v1.ConditionTrue {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: containersReady.Status,
|
||||
Reason: containersReady.Reason,
|
||||
Message: containersReady.Message,
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate corresponding conditions specified in readiness gate
|
||||
// Generate message if any readiness gate is not satisfied.
|
||||
unreadyMessages := []string{}
|
||||
for _, rg := range spec.ReadinessGates {
|
||||
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
|
||||
if c == nil {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType)))
|
||||
} else if c.Status != v1.ConditionTrue {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("the status of pod readiness gate %q is not \"True\", but %v", string(rg.ConditionType), c.Status))
|
||||
}
|
||||
}
|
||||
|
||||
// Set "Ready" condition to "False" if any readiness gate is not ready.
|
||||
if len(unreadyMessages) != 0 {
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ReadinessGatesNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
|
237
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate_test.go
generated
vendored
237
vendor/k8s.io/kubernetes/pkg/kubelet/status/generate_test.go
generated
vendored
@ -24,24 +24,24 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
func TestGenerateContainersReadyCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
spec *v1.PodSpec
|
||||
containerStatuses []v1.ContainerStatus
|
||||
podPhase v1.PodPhase
|
||||
expected v1.PodCondition
|
||||
expectReady v1.PodCondition
|
||||
}{
|
||||
{
|
||||
spec: nil,
|
||||
containerStatuses: nil,
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, UnknownContainerStatuses, ""),
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, UnknownContainerStatuses, ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(true, "", ""),
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
@ -51,7 +51,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, ContainersNotReady, "containers with unknown status: [1234]"),
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
@ -64,8 +64,8 @@ func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
getReadyStatus("1234"),
|
||||
getReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(true, "", ""),
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
@ -77,8 +77,8 @@ func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, ContainersNotReady, "containers with unknown status: [5678]"),
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
@ -91,8 +91,8 @@ func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
getReadyStatus("1234"),
|
||||
getNotReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expected: getReadyCondition(false, ContainersNotReady, "containers with unready status: [5678]"),
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
@ -103,15 +103,212 @@ func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getNotReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodSucceeded,
|
||||
expected: getReadyCondition(false, PodCompleted, ""),
|
||||
podPhase: v1.PodSucceeded,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, PodCompleted, ""),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
condition := GeneratePodReadyCondition(test.spec, test.containerStatuses, test.podPhase)
|
||||
if !reflect.DeepEqual(condition, test.expected) {
|
||||
t.Errorf("On test case %v, expected:\n%+v\ngot\n%+v\n", i, test.expected, condition)
|
||||
ready := GenerateContainersReadyCondition(test.spec, test.containerStatuses, test.podPhase)
|
||||
if !reflect.DeepEqual(ready, test.expectReady) {
|
||||
t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
spec *v1.PodSpec
|
||||
conditions []v1.PodCondition
|
||||
containerStatuses []v1.ContainerStatus
|
||||
podPhase v1.PodPhase
|
||||
expectReady v1.PodCondition
|
||||
}{
|
||||
{
|
||||
spec: nil,
|
||||
conditions: nil,
|
||||
containerStatuses: nil,
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, UnknownContainerStatuses, ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{},
|
||||
conditions: nil,
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
},
|
||||
conditions: nil,
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
conditions: nil,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
conditions: nil,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
conditions: nil,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getNotReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
},
|
||||
conditions: nil,
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getNotReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodSucceeded,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, PodCompleted, ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType("gate1")},
|
||||
},
|
||||
},
|
||||
conditions: nil,
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `corresponding condition of pod readiness gate "gate1" does not exist.`),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType("gate1")},
|
||||
},
|
||||
},
|
||||
conditions: []v1.PodCondition{
|
||||
getPodCondition("gate1", v1.ConditionFalse, "", ""),
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `the status of pod readiness gate "gate1" is not "True", but False`),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType("gate1")},
|
||||
},
|
||||
},
|
||||
conditions: []v1.PodCondition{
|
||||
getPodCondition("gate1", v1.ConditionTrue, "", ""),
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType("gate1")},
|
||||
{ConditionType: v1.PodConditionType("gate2")},
|
||||
},
|
||||
},
|
||||
conditions: []v1.PodCondition{
|
||||
getPodCondition("gate1", v1.ConditionTrue, "", ""),
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `corresponding condition of pod readiness gate "gate2" does not exist.`),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType("gate1")},
|
||||
{ConditionType: v1.PodConditionType("gate2")},
|
||||
},
|
||||
},
|
||||
conditions: []v1.PodCondition{
|
||||
getPodCondition("gate1", v1.ConditionTrue, "", ""),
|
||||
getPodCondition("gate2", v1.ConditionFalse, "", ""),
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `the status of pod readiness gate "gate2" is not "True", but False`),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType("gate1")},
|
||||
{ConditionType: v1.PodConditionType("gate2")},
|
||||
},
|
||||
},
|
||||
conditions: []v1.PodCondition{
|
||||
getPodCondition("gate1", v1.ConditionTrue, "", ""),
|
||||
getPodCondition("gate2", v1.ConditionTrue, "", ""),
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType("gate1")},
|
||||
},
|
||||
},
|
||||
conditions: []v1.PodCondition{
|
||||
getPodCondition("gate1", v1.ConditionTrue, "", ""),
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{getNotReadyStatus("1234")},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [1234]"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
ready := GeneratePodReadyCondition(test.spec, test.conditions, test.containerStatuses, test.podPhase)
|
||||
if !reflect.DeepEqual(ready, test.expectReady) {
|
||||
t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -220,13 +417,9 @@ func TestGeneratePodInitializedCondition(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getReadyCondition(ready bool, reason, message string) v1.PodCondition {
|
||||
status := v1.ConditionFalse
|
||||
if ready {
|
||||
status = v1.ConditionTrue
|
||||
}
|
||||
func getPodCondition(conditionType v1.PodConditionType, status v1.ConditionStatus, reason, message string) v1.PodCondition {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Type: conditionType,
|
||||
Status: status,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
|
139
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
139
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
@ -37,6 +37,7 @@ import (
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
statusutil "k8s.io/kubernetes/pkg/util/pod"
|
||||
)
|
||||
|
||||
// A wrapper around v1.PodStatus that includes a version to enforce that stale pod statuses are
|
||||
@ -121,11 +122,22 @@ func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podD
|
||||
}
|
||||
}
|
||||
|
||||
// isStatusEqual returns true if the given pod statuses are equal, false otherwise.
|
||||
// isPodStatusByKubeletEqual returns true if the given pod statuses are equal when non-kubelet-owned
|
||||
// pod conditions are excluded.
|
||||
// This method normalizes the status before comparing so as to make sure that meaningless
|
||||
// changes will be ignored.
|
||||
func isStatusEqual(oldStatus, status *v1.PodStatus) bool {
|
||||
return apiequality.Semantic.DeepEqual(status, oldStatus)
|
||||
func isPodStatusByKubeletEqual(oldStatus, status *v1.PodStatus) bool {
|
||||
oldCopy := oldStatus.DeepCopy()
|
||||
for _, c := range status.Conditions {
|
||||
if kubetypes.PodConditionByKubelet(c.Type) {
|
||||
_, oc := podutil.GetPodCondition(oldCopy, c.Type)
|
||||
if oc == nil || oc.Status != c.Status {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
oldCopy.Conditions = status.Conditions
|
||||
return apiequality.Semantic.DeepEqual(oldCopy, status)
|
||||
}
|
||||
|
||||
func (m *manager) Start() {
|
||||
@ -162,6 +174,13 @@ func (m *manager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
|
||||
func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if !kubetypes.PodConditionByKubelet(c.Type) {
|
||||
glog.Errorf("Kubelet is trying to update pod condition %q for pod %q. "+
|
||||
"But it is not owned by kubelet.", string(c.Type), format.Pod(pod))
|
||||
}
|
||||
}
|
||||
// Make sure we're caching a deep copy.
|
||||
status = *status.DeepCopy()
|
||||
|
||||
@ -207,22 +226,24 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
||||
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
|
||||
containerStatus.Ready = ready
|
||||
|
||||
// Update pod condition.
|
||||
readyConditionIndex := -1
|
||||
for i, condition := range status.Conditions {
|
||||
if condition.Type == v1.PodReady {
|
||||
readyConditionIndex = i
|
||||
break
|
||||
// updateConditionFunc updates the corresponding type of condition
|
||||
updateConditionFunc := func(conditionType v1.PodConditionType, condition v1.PodCondition) {
|
||||
conditionIndex := -1
|
||||
for i, condition := range status.Conditions {
|
||||
if condition.Type == conditionType {
|
||||
conditionIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if conditionIndex != -1 {
|
||||
status.Conditions[conditionIndex] = condition
|
||||
} else {
|
||||
glog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status)
|
||||
status.Conditions = append(status.Conditions, condition)
|
||||
}
|
||||
}
|
||||
readyCondition := GeneratePodReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase)
|
||||
if readyConditionIndex != -1 {
|
||||
status.Conditions[readyConditionIndex] = readyCondition
|
||||
} else {
|
||||
glog.Warningf("PodStatus missing PodReady condition: %+v", status)
|
||||
status.Conditions = append(status.Conditions, readyCondition)
|
||||
}
|
||||
|
||||
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase))
|
||||
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase))
|
||||
m.updateStatusInternal(pod, status, false)
|
||||
}
|
||||
|
||||
@ -316,26 +337,13 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
|
||||
}
|
||||
|
||||
// Set ReadyCondition.LastTransitionTime.
|
||||
if _, readyCondition := podutil.GetPodCondition(&status, v1.PodReady); readyCondition != nil {
|
||||
// Need to set LastTransitionTime.
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, oldReadyCondition := podutil.GetPodCondition(&oldStatus, v1.PodReady)
|
||||
if oldReadyCondition != nil && readyCondition.Status == oldReadyCondition.Status {
|
||||
lastTransitionTime = oldReadyCondition.LastTransitionTime
|
||||
}
|
||||
readyCondition.LastTransitionTime = lastTransitionTime
|
||||
}
|
||||
updateLastTransitionTime(&status, &oldStatus, v1.PodReady)
|
||||
|
||||
// Set InitializedCondition.LastTransitionTime.
|
||||
if _, initCondition := podutil.GetPodCondition(&status, v1.PodInitialized); initCondition != nil {
|
||||
// Need to set LastTransitionTime.
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, oldInitCondition := podutil.GetPodCondition(&oldStatus, v1.PodInitialized)
|
||||
if oldInitCondition != nil && initCondition.Status == oldInitCondition.Status {
|
||||
lastTransitionTime = oldInitCondition.LastTransitionTime
|
||||
}
|
||||
initCondition.LastTransitionTime = lastTransitionTime
|
||||
}
|
||||
updateLastTransitionTime(&status, &oldStatus, v1.PodInitialized)
|
||||
|
||||
// Set PodScheduledCondition.LastTransitionTime.
|
||||
updateLastTransitionTime(&status, &oldStatus, v1.PodScheduled)
|
||||
|
||||
// ensure that the start time does not change across updates.
|
||||
if oldStatus.StartTime != nil && !oldStatus.StartTime.IsZero() {
|
||||
@ -349,7 +357,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
|
||||
normalizeStatus(pod, &status)
|
||||
// The intent here is to prevent concurrent updates to a pod's status from
|
||||
// clobbering each other so the phase of a pod progresses monotonically.
|
||||
if isCached && isStatusEqual(&cachedStatus.status, &status) && !forceUpdate {
|
||||
if isCached && isPodStatusByKubeletEqual(&cachedStatus.status, &status) && !forceUpdate {
|
||||
glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status)
|
||||
return false // No new status.
|
||||
}
|
||||
@ -376,6 +384,21 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
|
||||
}
|
||||
}
|
||||
|
||||
// updateLastTransitionTime updates the LastTransitionTime of a pod condition.
|
||||
func updateLastTransitionTime(status, oldStatus *v1.PodStatus, conditionType v1.PodConditionType) {
|
||||
_, condition := podutil.GetPodCondition(status, conditionType)
|
||||
if condition == nil {
|
||||
return
|
||||
}
|
||||
// Need to set LastTransitionTime.
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, oldCondition := podutil.GetPodCondition(oldStatus, conditionType)
|
||||
if oldCondition != nil && condition.Status == oldCondition.Status {
|
||||
lastTransitionTime = oldCondition.LastTransitionTime
|
||||
}
|
||||
condition.LastTransitionTime = lastTransitionTime
|
||||
}
|
||||
|
||||
// deletePodStatus simply removes the given pod from the status cache.
|
||||
func (m *manager) deletePodStatus(uid types.UID) {
|
||||
m.podStatusesLock.Lock()
|
||||
@ -467,9 +490,10 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
||||
m.deletePodStatus(uid)
|
||||
return
|
||||
}
|
||||
pod.Status = status.status
|
||||
// TODO: handle conflict as a retry, make that easier too.
|
||||
newPod, err := m.kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
|
||||
oldStatus := pod.Status.DeepCopy()
|
||||
newPod, patchBytes, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, *oldStatus, mergePodStatus(*oldStatus, status.status))
|
||||
glog.V(3).Infof("Patch status for pod %q with %q", format.Pod(pod), patchBytes)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
|
||||
return
|
||||
@ -544,7 +568,7 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool {
|
||||
podStatus := pod.Status.DeepCopy()
|
||||
normalizeStatus(pod, podStatus)
|
||||
|
||||
if isStatusEqual(podStatus, &status) {
|
||||
if isPodStatusByKubeletEqual(podStatus, &status) {
|
||||
// If the status from the source is the same with the cached status,
|
||||
// reconcile is not needed. Just return.
|
||||
return false
|
||||
@ -557,7 +581,7 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool {
|
||||
|
||||
// We add this function, because apiserver only supports *RFC3339* now, which means that the timestamp returned by
|
||||
// apiserver has no nanosecond information. However, the timestamp returned by metav1.Now() contains nanosecond,
|
||||
// so when we do comparison between status from apiserver and cached status, isStatusEqual() will always return false.
|
||||
// so when we do comparison between status from apiserver and cached status, isPodStatusByKubeletEqual() will always return false.
|
||||
// There is related issue #15262 and PR #15263 about this.
|
||||
// In fact, the best way to solve this is to do it on api side. However, for now, we normalize the status locally in
|
||||
// kubelet temporarily.
|
||||
@ -611,3 +635,36 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus {
|
||||
kubetypes.SortInitContainerStatuses(pod, status.InitContainerStatuses)
|
||||
return status
|
||||
}
|
||||
|
||||
// mergePodStatus merges oldPodStatus and newPodStatus where pod conditions
|
||||
// not owned by kubelet is preserved from oldPodStatus
|
||||
func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus) v1.PodStatus {
|
||||
podConditions := []v1.PodCondition{}
|
||||
for _, c := range oldPodStatus.Conditions {
|
||||
if !kubetypes.PodConditionByKubelet(c.Type) {
|
||||
podConditions = append(podConditions, c)
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range newPodStatus.Conditions {
|
||||
if kubetypes.PodConditionByKubelet(c.Type) {
|
||||
podConditions = append(podConditions, c)
|
||||
}
|
||||
}
|
||||
newPodStatus.Conditions = podConditions
|
||||
return newPodStatus
|
||||
}
|
||||
|
||||
// NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile
|
||||
func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
|
||||
if len(pod.Spec.ReadinessGates) == 0 {
|
||||
return false
|
||||
}
|
||||
podReadyCondition := GeneratePodReadyCondition(&pod.Spec, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase)
|
||||
i, curCondition := podutil.GetPodConditionFromList(pod.Status.Conditions, v1.PodReady)
|
||||
// Only reconcile if "Ready" condition is present
|
||||
if i >= 0 && curCondition.Status != podReadyCondition.Status {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
310
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager_test.go
generated
vendored
310
vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager_test.go
generated
vendored
@ -19,6 +19,7 @@ package status
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -48,6 +49,10 @@ import (
|
||||
// Generate new instance of test pod with the same initial value.
|
||||
func getTestPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
@ -75,7 +80,7 @@ func (m *manager) testSyncBatch() {
|
||||
}
|
||||
|
||||
func newTestManager(kubeClient clientset.Interface) *manager {
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), kubesecret.NewFakeManager(), kubeconfigmap.NewFakeManager())
|
||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), kubesecret.NewFakeManager(), kubeconfigmap.NewFakeManager(), podtest.NewMockCheckpointManager())
|
||||
podManager.AddPod(getTestPod())
|
||||
return NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}).(*manager)
|
||||
}
|
||||
@ -303,7 +308,7 @@ func TestSyncPod(t *testing.T) {
|
||||
testPod := getTestPod()
|
||||
syncer.kubeClient = fake.NewSimpleClientset(testPod)
|
||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||
verifyActions(t, syncer, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, syncer, []core.Action{getAction(), patchAction()})
|
||||
}
|
||||
|
||||
func TestSyncPodChecksMismatchedUID(t *testing.T) {
|
||||
@ -357,18 +362,18 @@ func TestSyncPodNoDeadlock(t *testing.T) {
|
||||
t.Logf("Pod not deleted (success case).")
|
||||
ret = getTestPod()
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
|
||||
t.Logf("Pod is terminated, but still running.")
|
||||
pod.DeletionTimestamp = new(metav1.Time)
|
||||
pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
|
||||
t.Logf("Pod is terminated successfully.")
|
||||
pod.Status.ContainerStatuses[0].State.Running = nil
|
||||
pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{}
|
||||
m.SetPodStatus(pod, getRandomPodStatus())
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
|
||||
t.Logf("Error case.")
|
||||
ret = nil
|
||||
@ -392,7 +397,7 @@ func TestStaleUpdates(t *testing.T) {
|
||||
t.Logf("sync batch before syncPods pushes latest status, so we should see three statuses in the channel, but only one update")
|
||||
m.syncBatch()
|
||||
verifyUpdates(t, m, 3)
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
t.Logf("Nothing left in the channel to sync")
|
||||
verifyActions(t, m, []core.Action{})
|
||||
|
||||
@ -406,7 +411,7 @@ func TestStaleUpdates(t *testing.T) {
|
||||
|
||||
m.SetPodStatus(pod, status)
|
||||
m.syncBatch()
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
|
||||
t.Logf("Nothing stuck in the pipe.")
|
||||
verifyUpdates(t, m, 0)
|
||||
@ -443,10 +448,27 @@ func TestStatusEquality(t *testing.T) {
|
||||
}
|
||||
normalizeStatus(&pod, &oldPodStatus)
|
||||
normalizeStatus(&pod, &podStatus)
|
||||
if !isStatusEqual(&oldPodStatus, &podStatus) {
|
||||
if !isPodStatusByKubeletEqual(&oldPodStatus, &podStatus) {
|
||||
t.Fatalf("Order of container statuses should not affect normalized equality.")
|
||||
}
|
||||
}
|
||||
|
||||
oldPodStatus := podStatus
|
||||
podStatus.Conditions = append(podStatus.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("www.example.com/feature"),
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
|
||||
oldPodStatus.Conditions = append(podStatus.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("www.example.com/feature"),
|
||||
Status: v1.ConditionFalse,
|
||||
})
|
||||
|
||||
normalizeStatus(&pod, &oldPodStatus)
|
||||
normalizeStatus(&pod, &podStatus)
|
||||
if !isPodStatusByKubeletEqual(&oldPodStatus, &podStatus) {
|
||||
t.Fatalf("Differences in pod condition not owned by kubelet should not affect normalized equality.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusNormalizationEnforcesMaxBytes(t *testing.T) {
|
||||
@ -507,7 +529,7 @@ func TestStaticPod(t *testing.T) {
|
||||
t.Logf("Should be able to get the static pod status from status manager")
|
||||
retrievedStatus := expectPodStatus(t, m, staticPod)
|
||||
normalizeStatus(staticPod, &status)
|
||||
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
assert.True(t, isPodStatusByKubeletEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
|
||||
t.Logf("Should not sync pod in syncBatch because there is no corresponding mirror pod for the static pod.")
|
||||
m.syncBatch()
|
||||
@ -520,10 +542,10 @@ func TestStaticPod(t *testing.T) {
|
||||
|
||||
t.Logf("Should be able to get the mirror pod status from status manager")
|
||||
retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID)
|
||||
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
assert.True(t, isPodStatusByKubeletEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
|
||||
t.Logf("Should sync pod because the corresponding mirror pod is created")
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
|
||||
t.Logf("syncBatch should not sync any pods because nothing is changed.")
|
||||
m.testSyncBatch()
|
||||
@ -741,7 +763,7 @@ func TestReconcilePodStatus(t *testing.T) {
|
||||
t.Errorf("Pod status is different, a reconciliation is needed")
|
||||
}
|
||||
syncer.syncBatch()
|
||||
verifyActions(t, syncer, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, syncer, []core.Action{getAction(), patchAction()})
|
||||
}
|
||||
|
||||
func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus {
|
||||
@ -755,18 +777,16 @@ func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus {
|
||||
func TestDeletePods(t *testing.T) {
|
||||
pod := getTestPod()
|
||||
t.Logf("Set the deletion timestamp.")
|
||||
pod.DeletionTimestamp = new(metav1.Time)
|
||||
pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
client := fake.NewSimpleClientset(pod)
|
||||
m := newTestManager(client)
|
||||
m.podManager.AddPod(pod)
|
||||
|
||||
status := getRandomPodStatus()
|
||||
now := metav1.Now()
|
||||
status.StartTime = &now
|
||||
m.SetPodStatus(pod, status)
|
||||
|
||||
t.Logf("Expect to see a delete action.")
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction(), deleteAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction(), deleteAction()})
|
||||
}
|
||||
|
||||
func TestDoNotDeleteMirrorPods(t *testing.T) {
|
||||
@ -779,7 +799,7 @@ func TestDoNotDeleteMirrorPods(t *testing.T) {
|
||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||
}
|
||||
t.Logf("Set the deletion timestamp.")
|
||||
mirrorPod.DeletionTimestamp = new(metav1.Time)
|
||||
mirrorPod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
client := fake.NewSimpleClientset(mirrorPod)
|
||||
m := newTestManager(client)
|
||||
m.podManager.AddPod(staticPod)
|
||||
@ -795,7 +815,68 @@ func TestDoNotDeleteMirrorPods(t *testing.T) {
|
||||
m.SetPodStatus(staticPod, status)
|
||||
|
||||
t.Logf("Expect not to see a delete action.")
|
||||
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
}
|
||||
|
||||
func TestUpdateLastTransitionTime(t *testing.T) {
|
||||
old := metav1.Now()
|
||||
for desc, test := range map[string]struct {
|
||||
condition *v1.PodCondition
|
||||
oldCondition *v1.PodCondition
|
||||
expectUpdate bool
|
||||
}{
|
||||
"should do nothing if no corresponding condition": {
|
||||
expectUpdate: false,
|
||||
},
|
||||
"should update last transition time if no old condition": {
|
||||
condition: &v1.PodCondition{
|
||||
Type: "test-type",
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
oldCondition: nil,
|
||||
expectUpdate: true,
|
||||
},
|
||||
"should update last transition time if condition is changed": {
|
||||
condition: &v1.PodCondition{
|
||||
Type: "test-type",
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
oldCondition: &v1.PodCondition{
|
||||
Type: "test-type",
|
||||
Status: v1.ConditionFalse,
|
||||
LastTransitionTime: old,
|
||||
},
|
||||
expectUpdate: true,
|
||||
},
|
||||
"should keep last transition time if condition is not changed": {
|
||||
condition: &v1.PodCondition{
|
||||
Type: "test-type",
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
oldCondition: &v1.PodCondition{
|
||||
Type: "test-type",
|
||||
Status: v1.ConditionFalse,
|
||||
LastTransitionTime: old,
|
||||
},
|
||||
expectUpdate: false,
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase %q", desc)
|
||||
status := &v1.PodStatus{}
|
||||
oldStatus := &v1.PodStatus{}
|
||||
if test.condition != nil {
|
||||
status.Conditions = []v1.PodCondition{*test.condition}
|
||||
}
|
||||
if test.oldCondition != nil {
|
||||
oldStatus.Conditions = []v1.PodCondition{*test.oldCondition}
|
||||
}
|
||||
updateLastTransitionTime(status, oldStatus, "test-type")
|
||||
if test.expectUpdate {
|
||||
assert.True(t, status.Conditions[0].LastTransitionTime.After(old.Time))
|
||||
} else if test.condition != nil {
|
||||
assert.Equal(t, old, status.Conditions[0].LastTransitionTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getAction() core.GetAction {
|
||||
@ -806,6 +887,197 @@ func updateAction() core.UpdateAction {
|
||||
return core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}
|
||||
}
|
||||
|
||||
func patchAction() core.PatchAction {
|
||||
return core.PatchActionImpl{ActionImpl: core.ActionImpl{Verb: "patch", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}
|
||||
}
|
||||
|
||||
func deleteAction() core.DeleteAction {
|
||||
return core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: schema.GroupVersionResource{Resource: "pods"}}}
|
||||
}
|
||||
|
||||
func TestMergePodStatus(t *testing.T) {
|
||||
useCases := []struct {
|
||||
desc string
|
||||
oldPodStatus func(input v1.PodStatus) v1.PodStatus
|
||||
newPodStatus func(input v1.PodStatus) v1.PodStatus
|
||||
expectPodStatus v1.PodStatus
|
||||
}{
|
||||
{
|
||||
"no change",
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
getPodStatus(),
|
||||
},
|
||||
{
|
||||
"readiness changes",
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions[0].Status = v1.ConditionFalse
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"additional pod condition",
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
return input
|
||||
},
|
||||
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"additional pod condition and readiness changes",
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
return input
|
||||
},
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions[0].Status = v1.ConditionFalse
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
{
|
||||
"additional pod condition changes",
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
Status: v1.ConditionTrue,
|
||||
})
|
||||
return input
|
||||
},
|
||||
func(input v1.PodStatus) v1.PodStatus {
|
||||
input.Conditions = append(input.Conditions, v1.PodCondition{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
Status: v1.ConditionFalse,
|
||||
})
|
||||
return input
|
||||
},
|
||||
v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodConditionType("example.com/feature"),
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range useCases {
|
||||
output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()))
|
||||
if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) {
|
||||
t.Errorf("test case %q failed, expect: %+v, got %+v", tc.desc, tc.expectPodStatus, output)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func statusEqual(left, right v1.PodStatus) bool {
|
||||
left.Conditions = nil
|
||||
right.Conditions = nil
|
||||
return reflect.DeepEqual(left, right)
|
||||
}
|
||||
|
||||
func conditionsEqual(left, right []v1.PodCondition) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, l := range left {
|
||||
found := false
|
||||
for _, r := range right {
|
||||
if l.Type == r.Type {
|
||||
found = true
|
||||
if l.Status != r.Status {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getPodStatus() v1.PodStatus {
|
||||
return v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
Message: "Message",
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user