mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
rebase: update replaced k8s.io modules to v0.33.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
dd77e72800
commit
107407b44b
98
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/fake_status_manager.go
generated
vendored
98
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/fake_status_manager.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status/state"
|
||||
)
|
||||
|
||||
type fakeManager struct {
|
||||
state state.State
|
||||
}
|
||||
|
||||
func (m *fakeManager) Start() {
|
||||
klog.InfoS("Start()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
|
||||
klog.InfoS("GetPodStatus()")
|
||||
return v1.PodStatus{}, false
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) {
|
||||
klog.InfoS("SetPodStatus()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool) {
|
||||
klog.InfoS("SetContainerReadiness()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetContainerStartup(podUID types.UID, containerID kubecontainer.ContainerID, started bool) {
|
||||
klog.InfoS("SetContainerStartup()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) TerminatePod(pod *v1.Pod) {
|
||||
klog.InfoS("TerminatePod()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) {
|
||||
klog.InfoS("RemoveOrphanedStatuses()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool) {
|
||||
klog.InfoS("GetContainerResourceAllocation()")
|
||||
return m.state.GetContainerResourceAllocation(podUID, containerName)
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetPodResizeStatus(podUID types.UID) v1.PodResizeStatus {
|
||||
return m.state.GetPodResizeStatus(string(podUID))
|
||||
}
|
||||
|
||||
func (m *fakeManager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) {
|
||||
allocs := m.state.GetPodResourceAllocation()
|
||||
return updatePodFromAllocation(pod, allocs)
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetPodAllocation(pod *v1.Pod) error {
|
||||
klog.InfoS("SetPodAllocation()")
|
||||
for _, container := range pod.Spec.Containers {
|
||||
alloc := *container.Resources.DeepCopy()
|
||||
m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetPodResizeStatus(podUID types.UID, resizeStatus v1.PodResizeStatus) {
|
||||
m.state.SetPodResizeStatus(string(podUID), resizeStatus)
|
||||
}
|
||||
|
||||
// NewFakeManager creates empty/fake memory manager
|
||||
func NewFakeManager() Manager {
|
||||
return &fakeManager{
|
||||
state: state.NewStateMemory(),
|
||||
}
|
||||
}
|
127
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
127
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
@ -43,19 +43,20 @@ const (
|
||||
|
||||
// GenerateContainersReadyCondition returns the status of "ContainersReady" condition.
|
||||
// The status of "ContainersReady" condition is true when all containers are ready.
|
||||
func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
func GenerateContainersReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil {
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
Type: v1.ContainersReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
}
|
||||
unknownContainers := []string{}
|
||||
unreadyContainers := []string{}
|
||||
|
||||
for _, container := range spec.InitContainers {
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
@ -69,7 +70,7 @@ func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.C
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.Containers {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if !containerStatus.Ready {
|
||||
unreadyContainers = append(unreadyContainers, container.Name)
|
||||
@ -81,12 +82,12 @@ func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.C
|
||||
|
||||
// If all containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return generateContainersReadyConditionForTerminalPhase(podPhase)
|
||||
return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase)
|
||||
}
|
||||
|
||||
// If the pod phase is failed, explicitly set the ready condition to false for containers since they may be in progress of terminating.
|
||||
if podPhase == v1.PodFailed {
|
||||
return generateContainersReadyConditionForTerminalPhase(podPhase)
|
||||
return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase)
|
||||
}
|
||||
|
||||
// Generate message for containers in unknown condition.
|
||||
@ -100,38 +101,41 @@ func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.C
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotReady,
|
||||
Message: unreadyMessage,
|
||||
Type: v1.ContainersReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Type: v1.ContainersReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
// GeneratePodReadyCondition returns "Ready" condition of a pod.
|
||||
// The status of "Ready" condition is "True", if all containers in a pod are ready
|
||||
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
|
||||
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
containersReady := GenerateContainersReadyCondition(spec, containerStatuses, podPhase)
|
||||
func GeneratePodReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
containersReady := GenerateContainersReadyCondition(pod, oldPodStatus, containerStatuses, podPhase)
|
||||
// If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady.
|
||||
if containersReady.Status != v1.ConditionTrue {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: containersReady.Status,
|
||||
Reason: containersReady.Reason,
|
||||
Message: containersReady.Message,
|
||||
Type: v1.PodReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
|
||||
Status: containersReady.Status,
|
||||
Reason: containersReady.Reason,
|
||||
Message: containersReady.Message,
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate corresponding conditions specified in readiness gate
|
||||
// Generate message if any readiness gate is not satisfied.
|
||||
unreadyMessages := []string{}
|
||||
for _, rg := range spec.ReadinessGates {
|
||||
for _, rg := range pod.Spec.ReadinessGates {
|
||||
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
|
||||
if c == nil {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType)))
|
||||
@ -144,16 +148,18 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c
|
||||
if len(unreadyMessages) != 0 {
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ReadinessGatesNotReady,
|
||||
Message: unreadyMessage,
|
||||
Type: v1.PodReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ReadinessGatesNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Type: v1.PodReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
@ -172,19 +178,20 @@ func isInitContainerInitialized(initContainer *v1.Container, containerStatus *v1
|
||||
|
||||
// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it
|
||||
// returns an uninitialized condition.
|
||||
func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
func GeneratePodInitializedCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil && len(spec.InitContainers) > 0 {
|
||||
if containerStatuses == nil && len(pod.Spec.InitContainers) > 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
Type: v1.PodInitialized,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
}
|
||||
|
||||
unknownContainers := []string{}
|
||||
incompleteContainers := []string{}
|
||||
for _, container := range spec.InitContainers {
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name)
|
||||
if !ok {
|
||||
unknownContainers = append(unknownContainers, container.Name)
|
||||
@ -198,9 +205,10 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
|
||||
// If all init containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: PodCompleted,
|
||||
Type: v1.PodInitialized,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: PodCompleted,
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,10 +216,11 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
|
||||
// been initialized before.
|
||||
// This is needed to handle the case where the pod has been initialized but
|
||||
// the restartable init containers are restarting.
|
||||
if kubecontainer.HasAnyRegularContainerStarted(spec, containerStatuses) {
|
||||
if kubecontainer.HasAnyRegularContainerStarted(&pod.Spec, containerStatuses) {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
Type: v1.PodInitialized,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
@ -225,20 +234,22 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotInitialized,
|
||||
Message: unreadyMessage,
|
||||
Type: v1.PodInitialized,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotInitialized,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
Type: v1.PodInitialized,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodCondition {
|
||||
func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, podStatus *kubecontainer.PodStatus) v1.PodCondition {
|
||||
newSandboxNeeded, _, _ := runtimeutil.PodSandboxChanged(pod, podStatus)
|
||||
// if a new sandbox does not need to be created for a pod, it indicates that
|
||||
// a sandbox for the pod with networking configured already exists.
|
||||
@ -246,20 +257,23 @@ func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, podStatus *kubecont
|
||||
// fresh sandbox and configure networking for the sandbox.
|
||||
if !newSandboxNeeded {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReadyToStartContainers,
|
||||
Status: v1.ConditionTrue,
|
||||
Type: v1.PodReadyToStartContainers,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers),
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReadyToStartContainers,
|
||||
Status: v1.ConditionFalse,
|
||||
Type: v1.PodReadyToStartContainers,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers),
|
||||
Status: v1.ConditionFalse,
|
||||
}
|
||||
}
|
||||
|
||||
func generateContainersReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.PodCondition {
|
||||
func generateContainersReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
condition := v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Type: v1.ContainersReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
|
||||
Status: v1.ConditionFalse,
|
||||
}
|
||||
|
||||
if podPhase == v1.PodFailed {
|
||||
@ -271,10 +285,11 @@ func generateContainersReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.P
|
||||
return condition
|
||||
}
|
||||
|
||||
func generatePodReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.PodCondition {
|
||||
func generatePodReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
condition := v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Type: v1.PodReady,
|
||||
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
|
||||
Status: v1.ConditionFalse,
|
||||
}
|
||||
|
||||
if podPhase == v1.PodFailed {
|
||||
|
80
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/checkpoint.go
generated
vendored
80
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/checkpoint.go
generated
vendored
@ -1,80 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||
)
|
||||
|
||||
var _ checkpointmanager.Checkpoint = &Checkpoint{}
|
||||
|
||||
type PodResourceAllocationInfo struct {
|
||||
AllocationEntries map[string]map[string]v1.ResourceRequirements `json:"allocationEntries,omitempty"`
|
||||
}
|
||||
|
||||
// Checkpoint represents a structure to store pod resource allocation checkpoint data
|
||||
type Checkpoint struct {
|
||||
// Data is a serialized PodResourceAllocationInfo
|
||||
Data string `json:"data"`
|
||||
// Checksum is a checksum of Data
|
||||
Checksum checksum.Checksum `json:"checksum"`
|
||||
}
|
||||
|
||||
// NewCheckpoint creates a new checkpoint from a list of claim info states
|
||||
func NewCheckpoint(allocations *PodResourceAllocationInfo) (*Checkpoint, error) {
|
||||
|
||||
serializedAllocations, err := json.Marshal(allocations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize allocations for checkpointing: %w", err)
|
||||
}
|
||||
|
||||
cp := &Checkpoint{
|
||||
Data: string(serializedAllocations),
|
||||
}
|
||||
cp.Checksum = checksum.New(cp.Data)
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func (cp *Checkpoint) MarshalCheckpoint() ([]byte, error) {
|
||||
return json.Marshal(cp)
|
||||
}
|
||||
|
||||
// UnmarshalCheckpoint unmarshals checkpoint from JSON
|
||||
func (cp *Checkpoint) UnmarshalCheckpoint(blob []byte) error {
|
||||
return json.Unmarshal(blob, cp)
|
||||
}
|
||||
|
||||
// VerifyChecksum verifies that current checksum
|
||||
// of checkpointed Data is valid
|
||||
func (cp *Checkpoint) VerifyChecksum() error {
|
||||
return cp.Checksum.Verify(cp.Data)
|
||||
}
|
||||
|
||||
// GetPodResourceAllocationInfo returns Pod Resource Allocation info states from checkpoint
|
||||
func (cp *Checkpoint) GetPodResourceAllocationInfo() (*PodResourceAllocationInfo, error) {
|
||||
var data PodResourceAllocationInfo
|
||||
if err := json.Unmarshal([]byte(cp.Data), &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &data, nil
|
||||
}
|
60
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state.go
generated
vendored
60
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// PodResourceAllocation type is used in tracking resources allocated to pod's containers
|
||||
type PodResourceAllocation map[string]map[string]v1.ResourceRequirements
|
||||
|
||||
// PodResizeStatus type is used in tracking the last resize decision for pod
|
||||
type PodResizeStatus map[string]v1.PodResizeStatus
|
||||
|
||||
// Clone returns a copy of PodResourceAllocation
|
||||
func (pr PodResourceAllocation) Clone() PodResourceAllocation {
|
||||
prCopy := make(PodResourceAllocation)
|
||||
for pod := range pr {
|
||||
prCopy[pod] = make(map[string]v1.ResourceRequirements)
|
||||
for container, alloc := range pr[pod] {
|
||||
prCopy[pod][container] = *alloc.DeepCopy()
|
||||
}
|
||||
}
|
||||
return prCopy
|
||||
}
|
||||
|
||||
// Reader interface used to read current pod resource allocation state
|
||||
type Reader interface {
|
||||
GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool)
|
||||
GetPodResourceAllocation() PodResourceAllocation
|
||||
GetPodResizeStatus(podUID string) v1.PodResizeStatus
|
||||
}
|
||||
|
||||
type writer interface {
|
||||
SetContainerResourceAllocation(podUID string, containerName string, alloc v1.ResourceRequirements) error
|
||||
SetPodResourceAllocation(PodResourceAllocation) error
|
||||
SetPodResizeStatus(podUID string, resizeStatus v1.PodResizeStatus)
|
||||
Delete(podUID string, containerName string) error
|
||||
ClearState() error
|
||||
}
|
||||
|
||||
// State interface provides methods for tracking and setting pod resource allocation
|
||||
type State interface {
|
||||
Reader
|
||||
writer
|
||||
}
|
200
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_checkpoint.go
generated
vendored
200
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_checkpoint.go
generated
vendored
@ -1,200 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
|
||||
)
|
||||
|
||||
var _ State = &stateCheckpoint{}
|
||||
|
||||
type stateCheckpoint struct {
|
||||
mux sync.RWMutex
|
||||
cache State
|
||||
checkpointManager checkpointmanager.CheckpointManager
|
||||
checkpointName string
|
||||
}
|
||||
|
||||
// NewStateCheckpoint creates new State for keeping track of pod resource allocations with checkpoint backend
|
||||
func NewStateCheckpoint(stateDir, checkpointName string) (State, error) {
|
||||
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize checkpoint manager for pod allocation tracking: %v", err)
|
||||
}
|
||||
stateCheckpoint := &stateCheckpoint{
|
||||
cache: NewStateMemory(),
|
||||
checkpointManager: checkpointManager,
|
||||
checkpointName: checkpointName,
|
||||
}
|
||||
|
||||
if err := stateCheckpoint.restoreState(); err != nil {
|
||||
//lint:ignore ST1005 user-facing error message
|
||||
return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete pod allocation checkpoint file %q before restarting Kubelet", err, path.Join(stateDir, checkpointName))
|
||||
}
|
||||
return stateCheckpoint, nil
|
||||
}
|
||||
|
||||
// restores state from a checkpoint and creates it if it doesn't exist
|
||||
func (sc *stateCheckpoint) restoreState() error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
var err error
|
||||
|
||||
checkpoint, err := NewCheckpoint(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new checkpoint: %w", err)
|
||||
}
|
||||
|
||||
if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint); err != nil {
|
||||
if err == errors.ErrCheckpointNotFound {
|
||||
return sc.storeState()
|
||||
}
|
||||
return err
|
||||
}
|
||||
praInfo, err := checkpoint.GetPodResourceAllocationInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod resource allocation info: %w", err)
|
||||
}
|
||||
err = sc.cache.SetPodResourceAllocation(praInfo.AllocationEntries)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set pod resource allocation: %w", err)
|
||||
}
|
||||
klog.V(2).InfoS("State checkpoint: restored pod resource allocation state from checkpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
// saves state to a checkpoint, caller is responsible for locking
|
||||
func (sc *stateCheckpoint) storeState() error {
|
||||
podAllocation := sc.cache.GetPodResourceAllocation()
|
||||
|
||||
checkpoint, err := NewCheckpoint(&PodResourceAllocationInfo{
|
||||
AllocationEntries: podAllocation,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create checkpoint: %w", err)
|
||||
}
|
||||
err = sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to save pod allocation checkpoint")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetContainerResourceAllocation returns current resources allocated to a pod's container
|
||||
func (sc *stateCheckpoint) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool) {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
return sc.cache.GetContainerResourceAllocation(podUID, containerName)
|
||||
}
|
||||
|
||||
// GetPodResourceAllocation returns current pod resource allocation
|
||||
func (sc *stateCheckpoint) GetPodResourceAllocation() PodResourceAllocation {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
return sc.cache.GetPodResourceAllocation()
|
||||
}
|
||||
|
||||
// GetPodResizeStatus returns the last resize decision for a pod
|
||||
func (sc *stateCheckpoint) GetPodResizeStatus(podUID string) v1.PodResizeStatus {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
return sc.cache.GetPodResizeStatus(podUID)
|
||||
}
|
||||
|
||||
// SetContainerResourceAllocation sets resources allocated to a pod's container
|
||||
func (sc *stateCheckpoint) SetContainerResourceAllocation(podUID string, containerName string, alloc v1.ResourceRequirements) error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetContainerResourceAllocation(podUID, containerName, alloc)
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
// SetPodResourceAllocation sets pod resource allocation
|
||||
func (sc *stateCheckpoint) SetPodResourceAllocation(a PodResourceAllocation) error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetPodResourceAllocation(a)
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
// SetPodResizeStatus sets the last resize decision for a pod
|
||||
func (sc *stateCheckpoint) SetPodResizeStatus(podUID string, resizeStatus v1.PodResizeStatus) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetPodResizeStatus(podUID, resizeStatus)
|
||||
}
|
||||
|
||||
// Delete deletes allocations for specified pod
|
||||
func (sc *stateCheckpoint) Delete(podUID string, containerName string) error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.Delete(podUID, containerName)
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
// ClearState clears the state and saves it in a checkpoint
|
||||
func (sc *stateCheckpoint) ClearState() error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.ClearState()
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
type noopStateCheckpoint struct{}
|
||||
|
||||
// NewNoopStateCheckpoint creates a dummy state checkpoint manager
|
||||
func NewNoopStateCheckpoint() State {
|
||||
return &noopStateCheckpoint{}
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) GetContainerResourceAllocation(_ string, _ string) (v1.ResourceRequirements, bool) {
|
||||
return v1.ResourceRequirements{}, false
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) GetPodResourceAllocation() PodResourceAllocation {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) GetPodResizeStatus(_ string) v1.PodResizeStatus {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) SetContainerResourceAllocation(_ string, _ string, _ v1.ResourceRequirements) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) SetPodResourceAllocation(_ PodResourceAllocation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) SetPodResizeStatus(_ string, _ v1.PodResizeStatus) {}
|
||||
|
||||
func (sc *noopStateCheckpoint) Delete(_ string, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) ClearState() error {
|
||||
return nil
|
||||
}
|
128
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_mem.go
generated
vendored
128
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_mem.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type stateMemory struct {
|
||||
sync.RWMutex
|
||||
podAllocation PodResourceAllocation
|
||||
podResizeStatus PodResizeStatus
|
||||
}
|
||||
|
||||
var _ State = &stateMemory{}
|
||||
|
||||
// NewStateMemory creates new State to track resources allocated to pods
|
||||
func NewStateMemory() State {
|
||||
klog.V(2).InfoS("Initialized new in-memory state store for pod resource allocation tracking")
|
||||
return &stateMemory{
|
||||
podAllocation: PodResourceAllocation{},
|
||||
podResizeStatus: PodResizeStatus{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stateMemory) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
alloc, ok := s.podAllocation[podUID][containerName]
|
||||
return *alloc.DeepCopy(), ok
|
||||
}
|
||||
|
||||
func (s *stateMemory) GetPodResourceAllocation() PodResourceAllocation {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.podAllocation.Clone()
|
||||
}
|
||||
|
||||
func (s *stateMemory) GetPodResizeStatus(podUID string) v1.PodResizeStatus {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.podResizeStatus[podUID]
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetContainerResourceAllocation(podUID string, containerName string, alloc v1.ResourceRequirements) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if _, ok := s.podAllocation[podUID]; !ok {
|
||||
s.podAllocation[podUID] = make(map[string]v1.ResourceRequirements)
|
||||
}
|
||||
|
||||
s.podAllocation[podUID][containerName] = alloc
|
||||
klog.V(3).InfoS("Updated container resource allocation", "podUID", podUID, "containerName", containerName, "alloc", alloc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetPodResourceAllocation(a PodResourceAllocation) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.podAllocation = a.Clone()
|
||||
klog.V(3).InfoS("Updated pod resource allocation", "allocation", a)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetPodResizeStatus(podUID string, resizeStatus v1.PodResizeStatus) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if resizeStatus != "" {
|
||||
s.podResizeStatus[podUID] = resizeStatus
|
||||
} else {
|
||||
delete(s.podResizeStatus, podUID)
|
||||
}
|
||||
klog.V(3).InfoS("Updated pod resize state", "podUID", podUID, "resizeStatus", resizeStatus)
|
||||
}
|
||||
|
||||
func (s *stateMemory) deleteContainer(podUID string, containerName string) {
|
||||
delete(s.podAllocation[podUID], containerName)
|
||||
if len(s.podAllocation[podUID]) == 0 {
|
||||
delete(s.podAllocation, podUID)
|
||||
delete(s.podResizeStatus, podUID)
|
||||
}
|
||||
klog.V(3).InfoS("Deleted pod resource allocation", "podUID", podUID, "containerName", containerName)
|
||||
}
|
||||
|
||||
func (s *stateMemory) Delete(podUID string, containerName string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if len(containerName) == 0 {
|
||||
delete(s.podAllocation, podUID)
|
||||
delete(s.podResizeStatus, podUID)
|
||||
klog.V(3).InfoS("Deleted pod resource allocation and resize state", "podUID", podUID)
|
||||
return nil
|
||||
}
|
||||
s.deleteContainer(podUID, containerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stateMemory) ClearState() error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.podAllocation = make(PodResourceAllocation)
|
||||
s.podResizeStatus = make(PodResizeStatus)
|
||||
klog.V(3).InfoS("Cleared state")
|
||||
return nil
|
||||
}
|
246
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
246
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -40,15 +40,11 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status/state"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
kubeutil "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
statusutil "k8s.io/kubernetes/pkg/util/pod"
|
||||
)
|
||||
|
||||
// podStatusManagerStateFile is the file name where status manager stores its state
|
||||
const podStatusManagerStateFile = "pod_status_manager_state"
|
||||
|
||||
// A wrapper around v1.PodStatus that includes a version to enforce that stale pod statuses are
|
||||
// not sent to the API server.
|
||||
type versionedPodStatus struct {
|
||||
@ -72,19 +68,32 @@ type manager struct {
|
||||
kubeClient clientset.Interface
|
||||
podManager PodManager
|
||||
// Map from pod UID to sync status of the corresponding pod.
|
||||
podStatuses map[types.UID]versionedPodStatus
|
||||
podStatusesLock sync.RWMutex
|
||||
podStatusChannel chan struct{}
|
||||
podStatuses map[types.UID]versionedPodStatus
|
||||
podResizeConditions map[types.UID]podResizeConditions
|
||||
podStatusesLock sync.RWMutex
|
||||
podStatusChannel chan struct{}
|
||||
// Map from (mirror) pod UID to latest status version successfully sent to the API server.
|
||||
// apiStatusVersions must only be accessed from the sync thread.
|
||||
apiStatusVersions map[kubetypes.MirrorPodUID]uint64
|
||||
podDeletionSafety PodDeletionSafetyProvider
|
||||
|
||||
podStartupLatencyHelper PodStartupLatencyStateHelper
|
||||
// state allows to save/restore pod resource allocation and tolerate kubelet restarts.
|
||||
state state.State
|
||||
// stateFileDirectory holds the directory where the state file for checkpoints is held.
|
||||
stateFileDirectory string
|
||||
}
|
||||
|
||||
type podResizeConditions struct {
|
||||
PodResizePending *v1.PodCondition
|
||||
PodResizeInProgress *v1.PodCondition
|
||||
}
|
||||
|
||||
func (prc podResizeConditions) List() []*v1.PodCondition {
|
||||
var conditions []*v1.PodCondition
|
||||
if prc.PodResizePending != nil {
|
||||
conditions = append(conditions, prc.PodResizePending)
|
||||
}
|
||||
if prc.PodResizeInProgress != nil {
|
||||
conditions = append(conditions, prc.PodResizeInProgress)
|
||||
}
|
||||
return conditions
|
||||
}
|
||||
|
||||
// PodManager is the subset of methods the manager needs to observe the actual state of the kubelet.
|
||||
@ -143,42 +152,35 @@ type Manager interface {
|
||||
// the provided podUIDs.
|
||||
RemoveOrphanedStatuses(podUIDs map[types.UID]bool)
|
||||
|
||||
// GetPodResizeStatus returns cached PodStatus.Resize value
|
||||
GetPodResizeStatus(podUID types.UID) v1.PodResizeStatus
|
||||
// GetPodResizeConditions returns cached PodStatus Resize conditions value
|
||||
GetPodResizeConditions(podUID types.UID) []*v1.PodCondition
|
||||
|
||||
// SetPodResizeStatus caches the last resizing decision for the pod.
|
||||
SetPodResizeStatus(podUID types.UID, resize v1.PodResizeStatus)
|
||||
// SetPodResizePendingCondition caches the last PodResizePending condition for the pod.
|
||||
SetPodResizePendingCondition(podUID types.UID, reason, message string)
|
||||
|
||||
allocationManager
|
||||
}
|
||||
// SetPodResizeInProgressCondition caches the last PodResizeInProgress condition for the pod.
|
||||
SetPodResizeInProgressCondition(podUID types.UID, reason, message string, allowReasonToBeCleared bool)
|
||||
|
||||
// TODO(tallclair): Refactor allocation state handling out of the status manager.
|
||||
type allocationManager interface {
|
||||
// GetContainerResourceAllocation returns the checkpointed AllocatedResources value for the container
|
||||
GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool)
|
||||
// ClearPodResizePendingCondition clears the PodResizePending condition for the pod from the cache.
|
||||
ClearPodResizePendingCondition(podUID types.UID)
|
||||
|
||||
// UpdatePodFromAllocation overwrites the pod spec with the allocation.
|
||||
// This function does a deep copy only if updates are needed.
|
||||
// Returns the updated (or original) pod, and whether there was an allocation stored.
|
||||
UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool)
|
||||
|
||||
// SetPodAllocation checkpoints the resources allocated to a pod's containers.
|
||||
SetPodAllocation(pod *v1.Pod) error
|
||||
// ClearPodResizeInProgressCondition clears the PodResizeInProgress condition for the pod from the cache.
|
||||
ClearPodResizeInProgressCondition(podUID types.UID)
|
||||
}
|
||||
|
||||
const syncPeriod = 10 * time.Second
|
||||
|
||||
// NewManager returns a functional Manager.
|
||||
func NewManager(kubeClient clientset.Interface, podManager PodManager, podDeletionSafety PodDeletionSafetyProvider, podStartupLatencyHelper PodStartupLatencyStateHelper, stateFileDirectory string) Manager {
|
||||
func NewManager(kubeClient clientset.Interface, podManager PodManager, podDeletionSafety PodDeletionSafetyProvider, podStartupLatencyHelper PodStartupLatencyStateHelper) Manager {
|
||||
return &manager{
|
||||
kubeClient: kubeClient,
|
||||
podManager: podManager,
|
||||
podStatuses: make(map[types.UID]versionedPodStatus),
|
||||
podResizeConditions: make(map[types.UID]podResizeConditions),
|
||||
podStatusChannel: make(chan struct{}, 1),
|
||||
apiStatusVersions: make(map[kubetypes.MirrorPodUID]uint64),
|
||||
podDeletionSafety: podDeletionSafety,
|
||||
podStartupLatencyHelper: podStartupLatencyHelper,
|
||||
stateFileDirectory: stateFileDirectory,
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,34 +190,35 @@ func NewManager(kubeClient clientset.Interface, podManager PodManager, podDeleti
|
||||
// changes will be ignored.
|
||||
func isPodStatusByKubeletEqual(oldStatus, status *v1.PodStatus) bool {
|
||||
oldCopy := oldStatus.DeepCopy()
|
||||
|
||||
newConditions := make(map[v1.PodConditionType]*v1.PodCondition, len(status.Conditions))
|
||||
oldConditions := make(map[v1.PodConditionType]*v1.PodCondition, len(oldStatus.Conditions))
|
||||
for _, c := range status.Conditions {
|
||||
// both owned and shared conditions are used for kubelet status equality
|
||||
if kubetypes.PodConditionByKubelet(c.Type) || kubetypes.PodConditionSharedByKubelet(c.Type) {
|
||||
_, oc := podutil.GetPodCondition(oldCopy, c.Type)
|
||||
if oc == nil || oc.Status != c.Status || oc.Message != c.Message || oc.Reason != c.Reason {
|
||||
return false
|
||||
}
|
||||
newConditions[c.Type] = &c
|
||||
}
|
||||
}
|
||||
for _, c := range oldStatus.Conditions {
|
||||
if kubetypes.PodConditionByKubelet(c.Type) || kubetypes.PodConditionSharedByKubelet(c.Type) {
|
||||
oldConditions[c.Type] = &c
|
||||
}
|
||||
}
|
||||
|
||||
if len(newConditions) != len(oldConditions) {
|
||||
return false
|
||||
}
|
||||
for _, newCondition := range newConditions {
|
||||
oldCondition := oldConditions[newCondition.Type]
|
||||
if oldCondition == nil || oldCondition.Status != newCondition.Status || oldCondition.Message != newCondition.Message || oldCondition.Reason != newCondition.Reason {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
oldCopy.Conditions = status.Conditions
|
||||
return apiequality.Semantic.DeepEqual(oldCopy, status)
|
||||
}
|
||||
|
||||
func (m *manager) Start() {
|
||||
// Initialize m.state to no-op state checkpoint manager
|
||||
m.state = state.NewNoopStateCheckpoint()
|
||||
|
||||
// Create pod allocation checkpoint manager even if client is nil so as to allow local get/set of AllocatedResources & Resize
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
stateImpl, err := state.NewStateCheckpoint(m.stateFileDirectory, podStatusManagerStateFile)
|
||||
if err != nil {
|
||||
// This is a crictical, non-recoverable failure.
|
||||
klog.ErrorS(err, "Could not initialize pod allocation checkpoint manager, please drain node and remove policy state file")
|
||||
panic(err)
|
||||
}
|
||||
m.state = stateImpl
|
||||
}
|
||||
|
||||
// Don't start the status manager if we don't have a client. This will happen
|
||||
// on the master, where the kubelet is responsible for bootstrapping the pods
|
||||
// of the master components.
|
||||
@ -244,72 +247,65 @@ func (m *manager) Start() {
|
||||
}, 0)
|
||||
}
|
||||
|
||||
// GetContainerResourceAllocation returns the last checkpointed AllocatedResources values
|
||||
// If checkpoint manager has not been initialized, it returns nil, false
|
||||
func (m *manager) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool) {
|
||||
// GetPodResizeConditions returns the last cached ResizeStatus value.
|
||||
func (m *manager) GetPodResizeConditions(podUID types.UID) []*v1.PodCondition {
|
||||
m.podStatusesLock.RLock()
|
||||
defer m.podStatusesLock.RUnlock()
|
||||
return m.state.GetContainerResourceAllocation(podUID, containerName)
|
||||
return m.podResizeConditions[podUID].List()
|
||||
}
|
||||
|
||||
// UpdatePodFromAllocation overwrites the pod spec with the allocation.
|
||||
// This function does a deep copy only if updates are needed.
|
||||
func (m *manager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) {
|
||||
m.podStatusesLock.RLock()
|
||||
defer m.podStatusesLock.RUnlock()
|
||||
// TODO(tallclair): This clones the whole cache, but we only need 1 pod.
|
||||
allocs := m.state.GetPodResourceAllocation()
|
||||
return updatePodFromAllocation(pod, allocs)
|
||||
}
|
||||
// SetPodResizePendingCondition caches the last PodResizePending condition for the pod.
|
||||
func (m *manager) SetPodResizePendingCondition(podUID types.UID, reason, message string) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
|
||||
func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*v1.Pod, bool) {
|
||||
allocated, found := allocs[string(pod.UID)]
|
||||
if !found {
|
||||
return pod, false
|
||||
m.podResizeConditions[podUID] = podResizeConditions{
|
||||
PodResizePending: updatedPodResizeCondition(v1.PodResizePending, m.podResizeConditions[podUID].PodResizePending, reason, message),
|
||||
PodResizeInProgress: m.podResizeConditions[podUID].PodResizeInProgress,
|
||||
}
|
||||
}
|
||||
|
||||
updated := false
|
||||
for i, c := range pod.Spec.Containers {
|
||||
if cAlloc, ok := allocated[c.Name]; ok {
|
||||
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
|
||||
// Allocation differs from pod spec, update
|
||||
if !updated {
|
||||
// If this is the first update, copy the pod
|
||||
pod = pod.DeepCopy()
|
||||
updated = true
|
||||
}
|
||||
pod.Spec.Containers[i].Resources = cAlloc
|
||||
// SetPodResizeInProgressCondition caches the last PodResizeInProgress condition for the pod.
|
||||
func (m *manager) SetPodResizeInProgressCondition(podUID types.UID, reason, message string, allowReasonToBeCleared bool) {
|
||||
oldConditions := m.GetPodResizeConditions(podUID)
|
||||
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
|
||||
if !allowReasonToBeCleared && reason == "" && message == "" {
|
||||
// Preserve the old reason and message if there is one.
|
||||
for _, c := range oldConditions {
|
||||
if c.Type == v1.PodResizeInProgress {
|
||||
reason = c.Reason
|
||||
message = c.Message
|
||||
}
|
||||
}
|
||||
}
|
||||
return pod, updated
|
||||
}
|
||||
|
||||
// GetPodResizeStatus returns the last cached ResizeStatus value.
|
||||
func (m *manager) GetPodResizeStatus(podUID types.UID) v1.PodResizeStatus {
|
||||
m.podStatusesLock.RLock()
|
||||
defer m.podStatusesLock.RUnlock()
|
||||
return m.state.GetPodResizeStatus(string(podUID))
|
||||
}
|
||||
|
||||
// SetPodAllocation checkpoints the resources allocated to a pod's containers
|
||||
func (m *manager) SetPodAllocation(pod *v1.Pod) error {
|
||||
m.podStatusesLock.RLock()
|
||||
defer m.podStatusesLock.RUnlock()
|
||||
for _, container := range pod.Spec.Containers {
|
||||
alloc := *container.Resources.DeepCopy()
|
||||
if err := m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc); err != nil {
|
||||
return err
|
||||
}
|
||||
m.podResizeConditions[podUID] = podResizeConditions{
|
||||
PodResizeInProgress: updatedPodResizeCondition(v1.PodResizeInProgress, m.podResizeConditions[podUID].PodResizeInProgress, reason, message),
|
||||
PodResizePending: m.podResizeConditions[podUID].PodResizePending,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPodResizeStatus checkpoints the last resizing decision for the pod.
|
||||
func (m *manager) SetPodResizeStatus(podUID types.UID, resizeStatus v1.PodResizeStatus) {
|
||||
m.podStatusesLock.RLock()
|
||||
defer m.podStatusesLock.RUnlock()
|
||||
m.state.SetPodResizeStatus(string(podUID), resizeStatus)
|
||||
// ClearPodResizePendingCondition clears the PodResizePending condition for the pod from the cache.
|
||||
func (m *manager) ClearPodResizePendingCondition(podUID types.UID) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
m.podResizeConditions[podUID] = podResizeConditions{
|
||||
PodResizePending: nil,
|
||||
PodResizeInProgress: m.podResizeConditions[podUID].PodResizeInProgress,
|
||||
}
|
||||
}
|
||||
|
||||
// ClearPodResizeInProgressCondition clears the PodResizeInProgress condition for the pod from the cache.
|
||||
func (m *manager) ClearPodResizeInProgressCondition(podUID types.UID) {
|
||||
m.podStatusesLock.Lock()
|
||||
defer m.podStatusesLock.Unlock()
|
||||
m.podResizeConditions[podUID] = podResizeConditions{
|
||||
PodResizePending: m.podResizeConditions[podUID].PodResizePending,
|
||||
PodResizeInProgress: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
|
||||
@ -326,6 +322,9 @@ func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) {
|
||||
// Make sure we're caching a deep copy.
|
||||
status = *status.DeepCopy()
|
||||
|
||||
// Set the observedGeneration for this pod status.
|
||||
status.ObservedGeneration = podutil.GetPodObservedGenerationIfEnabled(pod)
|
||||
|
||||
// Force a status update if deletion timestamp is set. This is necessary
|
||||
// because if the pod is in the non-running state, the pod worker still
|
||||
// needs to be able to trigger an update and/or deletion.
|
||||
@ -388,9 +387,10 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
||||
status.Conditions = append(status.Conditions, condition)
|
||||
}
|
||||
}
|
||||
|
||||
allContainerStatuses := append(status.InitContainerStatuses, status.ContainerStatuses...)
|
||||
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, allContainerStatuses, status.Phase))
|
||||
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, allContainerStatuses, status.Phase))
|
||||
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(pod, &oldStatus.status, status.Conditions, allContainerStatuses, status.Phase))
|
||||
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(pod, &oldStatus.status, allContainerStatuses, status.Phase))
|
||||
m.updateStatusInternal(pod, status, false, false)
|
||||
}
|
||||
|
||||
@ -690,6 +690,11 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
|
||||
status.StartTime = &now
|
||||
}
|
||||
|
||||
// prevent sending unnecessary patches
|
||||
if oldStatus.ObservedGeneration > status.ObservedGeneration {
|
||||
status.ObservedGeneration = oldStatus.ObservedGeneration
|
||||
}
|
||||
|
||||
normalizeStatus(pod, &status)
|
||||
|
||||
// Perform some more extensive logging of container termination state to assist in
|
||||
@ -779,7 +784,7 @@ func (m *manager) deletePodStatus(uid types.UID) {
|
||||
delete(m.podStatuses, uid)
|
||||
m.podStartupLatencyHelper.DeletePodStartupState(uid)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
m.state.Delete(string(uid), "")
|
||||
delete(m.podResizeConditions, uid)
|
||||
}
|
||||
}
|
||||
|
||||
@ -792,7 +797,7 @@ func (m *manager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) {
|
||||
klog.V(5).InfoS("Removing pod from status map.", "podUID", key)
|
||||
delete(m.podStatuses, key)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
m.state.Delete(string(key), "")
|
||||
delete(m.podResizeConditions, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -905,7 +910,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
|
||||
return
|
||||
}
|
||||
|
||||
mergedStatus := mergePodStatus(pod.Status, status.status, m.podDeletionSafety.PodCouldHaveRunningContainers(pod))
|
||||
mergedStatus := mergePodStatus(pod, pod.Status, status.status, m.podDeletionSafety.PodCouldHaveRunningContainers(pod))
|
||||
|
||||
newPod, patchBytes, unchanged, err := statusutil.PatchPodStatus(context.TODO(), m.kubeClient, pod.Namespace, pod.Name, pod.UID, pod.Status, mergedStatus)
|
||||
klog.V(3).InfoS("Patch status for pod", "pod", klog.KObj(pod), "podUID", uid, "patch", string(patchBytes))
|
||||
@ -1083,7 +1088,7 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus {
|
||||
// mergePodStatus merges oldPodStatus and newPodStatus to preserve where pod conditions
|
||||
// not owned by kubelet and to ensure terminal phase transition only happens after all
|
||||
// running containers have terminated. This method does not modify the old status.
|
||||
func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningContainers bool) v1.PodStatus {
|
||||
func mergePodStatus(pod *v1.Pod, oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningContainers bool) v1.PodStatus {
|
||||
podConditions := make([]v1.PodCondition, 0, len(oldPodStatus.Conditions)+len(newPodStatus.Conditions))
|
||||
|
||||
for _, c := range oldPodStatus.Conditions {
|
||||
@ -1145,10 +1150,10 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
|
||||
// See https://issues.k8s.io/108594 for more details.
|
||||
if podutil.IsPodPhaseTerminal(newPodStatus.Phase) {
|
||||
if podutil.IsPodReadyConditionTrue(newPodStatus) || podutil.IsContainersReadyConditionTrue(newPodStatus) {
|
||||
containersReadyCondition := generateContainersReadyConditionForTerminalPhase(newPodStatus.Phase)
|
||||
containersReadyCondition := generateContainersReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase)
|
||||
podutil.UpdatePodCondition(&newPodStatus, &containersReadyCondition)
|
||||
|
||||
podReadyCondition := generatePodReadyConditionForTerminalPhase(newPodStatus.Phase)
|
||||
podReadyCondition := generatePodReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase)
|
||||
podutil.UpdatePodCondition(&newPodStatus, &podReadyCondition)
|
||||
}
|
||||
}
|
||||
@ -1161,7 +1166,7 @@ func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
|
||||
if len(pod.Spec.ReadinessGates) == 0 {
|
||||
return false
|
||||
}
|
||||
podReadyCondition := GeneratePodReadyCondition(&pod.Spec, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase)
|
||||
podReadyCondition := GeneratePodReadyCondition(pod, &pod.Status, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase)
|
||||
i, curCondition := podutil.GetPodConditionFromList(pod.Status.Conditions, v1.PodReady)
|
||||
// Only reconcile if "Ready" condition is present and Status or Message is not expected
|
||||
if i >= 0 && (curCondition.Status != podReadyCondition.Status || curCondition.Message != podReadyCondition.Message) {
|
||||
@ -1169,3 +1174,22 @@ func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func updatedPodResizeCondition(conditionType v1.PodConditionType, oldCondition *v1.PodCondition, reason, message string) *v1.PodCondition {
|
||||
now := metav1.NewTime(time.Now())
|
||||
var lastTransitionTime metav1.Time
|
||||
if oldCondition == nil || oldCondition.Reason != reason {
|
||||
lastTransitionTime = now
|
||||
} else {
|
||||
lastTransitionTime = oldCondition.LastTransitionTime
|
||||
}
|
||||
|
||||
return &v1.PodCondition{
|
||||
Type: conditionType,
|
||||
Status: v1.ConditionTrue,
|
||||
LastProbeTime: now,
|
||||
LastTransitionTime: lastTransitionTime,
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user