mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
build: move e2e dependencies into e2e/go.mod
Several packages are only used while running the e2e suite. These packages are less important to update, as the they can not influence the final executable that is part of the Ceph-CSI container-image. By moving these dependencies out of the main Ceph-CSI go.mod, it is easier to identify if a reported CVE affects Ceph-CSI, or only the testing (like most of the Kubernetes CVEs). Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
15da101b1b
commit
bec6090996
10
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/.mockery.yaml
generated
vendored
Normal file
10
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/.mockery.yaml
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
dir: testing
|
||||
filename: "mock_{{.InterfaceName | snakecase}}.go"
|
||||
boilerplate-file: ../../../hack/boilerplate/boilerplate.generatego.txt
|
||||
outpkg: testing
|
||||
with-expecter: true
|
||||
packages:
|
||||
k8s.io/kubernetes/pkg/kubelet/status:
|
||||
interfaces:
|
||||
PodStatusProvider:
|
98
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/fake_status_manager.go
generated
vendored
Normal file
98
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/fake_status_manager.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status/state"
|
||||
)
|
||||
|
||||
type fakeManager struct {
|
||||
state state.State
|
||||
}
|
||||
|
||||
func (m *fakeManager) Start() {
|
||||
klog.InfoS("Start()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
|
||||
klog.InfoS("GetPodStatus()")
|
||||
return v1.PodStatus{}, false
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) {
|
||||
klog.InfoS("SetPodStatus()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetContainerReadiness(podUID types.UID, containerID kubecontainer.ContainerID, ready bool) {
|
||||
klog.InfoS("SetContainerReadiness()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetContainerStartup(podUID types.UID, containerID kubecontainer.ContainerID, started bool) {
|
||||
klog.InfoS("SetContainerStartup()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) TerminatePod(pod *v1.Pod) {
|
||||
klog.InfoS("TerminatePod()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) {
|
||||
klog.InfoS("RemoveOrphanedStatuses()")
|
||||
return
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool) {
|
||||
klog.InfoS("GetContainerResourceAllocation()")
|
||||
return m.state.GetContainerResourceAllocation(podUID, containerName)
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetPodResizeStatus(podUID types.UID) v1.PodResizeStatus {
|
||||
return m.state.GetPodResizeStatus(string(podUID))
|
||||
}
|
||||
|
||||
func (m *fakeManager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) {
|
||||
allocs := m.state.GetPodResourceAllocation()
|
||||
return updatePodFromAllocation(pod, allocs)
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetPodAllocation(pod *v1.Pod) error {
|
||||
klog.InfoS("SetPodAllocation()")
|
||||
for _, container := range pod.Spec.Containers {
|
||||
alloc := *container.Resources.DeepCopy()
|
||||
m.state.SetContainerResourceAllocation(string(pod.UID), container.Name, alloc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fakeManager) SetPodResizeStatus(podUID types.UID, resizeStatus v1.PodResizeStatus) {
|
||||
m.state.SetPodResizeStatus(string(podUID), resizeStatus)
|
||||
}
|
||||
|
||||
// NewFakeManager creates empty/fake memory manager
|
||||
func NewFakeManager() Manager {
|
||||
return &fakeManager{
|
||||
state: state.NewStateMemory(),
|
||||
}
|
||||
}
|
287
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
Normal file
287
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/generate.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnknownContainerStatuses says that all container statuses are unknown.
|
||||
UnknownContainerStatuses = "UnknownContainerStatuses"
|
||||
// PodCompleted says that all related containers have succeeded.
|
||||
PodCompleted = "PodCompleted"
|
||||
// PodFailed says that the pod has failed and as such the containers have failed.
|
||||
PodFailed = "PodFailed"
|
||||
// ContainersNotReady says that one or more containers are not ready.
|
||||
ContainersNotReady = "ContainersNotReady"
|
||||
// ContainersNotInitialized says that one or more init containers have not succeeded.
|
||||
ContainersNotInitialized = "ContainersNotInitialized"
|
||||
// ReadinessGatesNotReady says that one or more pod readiness gates are not ready.
|
||||
ReadinessGatesNotReady = "ReadinessGatesNotReady"
|
||||
)
|
||||
|
||||
// GenerateContainersReadyCondition returns the status of "ContainersReady" condition.
|
||||
// The status of "ContainersReady" condition is true when all containers are ready.
|
||||
func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil {
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
}
|
||||
unknownContainers := []string{}
|
||||
unreadyContainers := []string{}
|
||||
|
||||
for _, container := range spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if !containerStatus.Ready {
|
||||
unreadyContainers = append(unreadyContainers, container.Name)
|
||||
}
|
||||
} else {
|
||||
unknownContainers = append(unknownContainers, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.Containers {
|
||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
if !containerStatus.Ready {
|
||||
unreadyContainers = append(unreadyContainers, container.Name)
|
||||
}
|
||||
} else {
|
||||
unknownContainers = append(unknownContainers, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// If all containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return generateContainersReadyConditionForTerminalPhase(podPhase)
|
||||
}
|
||||
|
||||
// If the pod phase is failed, explicitly set the ready condition to false for containers since they may be in progress of terminating.
|
||||
if podPhase == v1.PodFailed {
|
||||
return generateContainersReadyConditionForTerminalPhase(podPhase)
|
||||
}
|
||||
|
||||
// Generate message for containers in unknown condition.
|
||||
unreadyMessages := []string{}
|
||||
if len(unknownContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
|
||||
}
|
||||
if len(unreadyContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unready status: %s", unreadyContainers))
|
||||
}
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
// GeneratePodReadyCondition returns "Ready" condition of a pod.
|
||||
// The status of "Ready" condition is "True", if all containers in a pod are ready
|
||||
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
|
||||
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
containersReady := GenerateContainersReadyCondition(spec, containerStatuses, podPhase)
|
||||
// If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady.
|
||||
if containersReady.Status != v1.ConditionTrue {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: containersReady.Status,
|
||||
Reason: containersReady.Reason,
|
||||
Message: containersReady.Message,
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate corresponding conditions specified in readiness gate
|
||||
// Generate message if any readiness gate is not satisfied.
|
||||
unreadyMessages := []string{}
|
||||
for _, rg := range spec.ReadinessGates {
|
||||
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
|
||||
if c == nil {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType)))
|
||||
} else if c.Status != v1.ConditionTrue {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("the status of pod readiness gate %q is not \"True\", but %v", string(rg.ConditionType), c.Status))
|
||||
}
|
||||
}
|
||||
|
||||
// Set "Ready" condition to "False" if any readiness gate is not ready.
|
||||
if len(unreadyMessages) != 0 {
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ReadinessGatesNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
func isInitContainerInitialized(initContainer *v1.Container, containerStatus *v1.ContainerStatus) bool {
|
||||
if podutil.IsRestartableInitContainer(initContainer) {
|
||||
if containerStatus.Started == nil || !*containerStatus.Started {
|
||||
return false
|
||||
}
|
||||
} else { // regular init container
|
||||
if !containerStatus.Ready {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it
|
||||
// returns an uninitialized condition.
|
||||
func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil && len(spec.InitContainers) > 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
}
|
||||
|
||||
unknownContainers := []string{}
|
||||
incompleteContainers := []string{}
|
||||
for _, container := range spec.InitContainers {
|
||||
containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name)
|
||||
if !ok {
|
||||
unknownContainers = append(unknownContainers, container.Name)
|
||||
continue
|
||||
}
|
||||
if !isInitContainerInitialized(&container, &containerStatus) {
|
||||
incompleteContainers = append(incompleteContainers, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// If all init containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: PodCompleted,
|
||||
}
|
||||
}
|
||||
|
||||
// If there is any regular container that has started, then the pod has
|
||||
// been initialized before.
|
||||
// This is needed to handle the case where the pod has been initialized but
|
||||
// the restartable init containers are restarting.
|
||||
if kubecontainer.HasAnyRegularContainerStarted(spec, containerStatuses) {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
unreadyMessages := make([]string, 0, len(unknownContainers)+len(incompleteContainers))
|
||||
if len(unknownContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
|
||||
}
|
||||
if len(incompleteContainers) > 0 {
|
||||
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with incomplete status: %s", incompleteContainers))
|
||||
}
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotInitialized,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodInitialized,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodCondition {
|
||||
newSandboxNeeded, _, _ := runtimeutil.PodSandboxChanged(pod, podStatus)
|
||||
// if a new sandbox does not need to be created for a pod, it indicates that
|
||||
// a sandbox for the pod with networking configured already exists.
|
||||
// Otherwise, the kubelet needs to invoke the container runtime to create a
|
||||
// fresh sandbox and configure networking for the sandbox.
|
||||
if !newSandboxNeeded {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReadyToStartContainers,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReadyToStartContainers,
|
||||
Status: v1.ConditionFalse,
|
||||
}
|
||||
}
|
||||
|
||||
func generateContainersReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.PodCondition {
|
||||
condition := v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
}
|
||||
|
||||
if podPhase == v1.PodFailed {
|
||||
condition.Reason = PodFailed
|
||||
} else if podPhase == v1.PodSucceeded {
|
||||
condition.Reason = PodCompleted
|
||||
}
|
||||
|
||||
return condition
|
||||
}
|
||||
|
||||
func generatePodReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.PodCondition {
|
||||
condition := v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
}
|
||||
|
||||
if podPhase == v1.PodFailed {
|
||||
condition.Reason = PodFailed
|
||||
} else if podPhase == v1.PodSucceeded {
|
||||
condition.Reason = PodCompleted
|
||||
}
|
||||
|
||||
return condition
|
||||
}
|
80
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/checkpoint.go
generated
vendored
Normal file
80
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/checkpoint.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||
)
|
||||
|
||||
var _ checkpointmanager.Checkpoint = &Checkpoint{}
|
||||
|
||||
type PodResourceAllocationInfo struct {
|
||||
AllocationEntries map[string]map[string]v1.ResourceRequirements `json:"allocationEntries,omitempty"`
|
||||
}
|
||||
|
||||
// Checkpoint represents a structure to store pod resource allocation checkpoint data
|
||||
type Checkpoint struct {
|
||||
// Data is a serialized PodResourceAllocationInfo
|
||||
Data string `json:"data"`
|
||||
// Checksum is a checksum of Data
|
||||
Checksum checksum.Checksum `json:"checksum"`
|
||||
}
|
||||
|
||||
// NewCheckpoint creates a new checkpoint from a list of claim info states
|
||||
func NewCheckpoint(allocations *PodResourceAllocationInfo) (*Checkpoint, error) {
|
||||
|
||||
serializedAllocations, err := json.Marshal(allocations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize allocations for checkpointing: %w", err)
|
||||
}
|
||||
|
||||
cp := &Checkpoint{
|
||||
Data: string(serializedAllocations),
|
||||
}
|
||||
cp.Checksum = checksum.New(cp.Data)
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func (cp *Checkpoint) MarshalCheckpoint() ([]byte, error) {
|
||||
return json.Marshal(cp)
|
||||
}
|
||||
|
||||
// UnmarshalCheckpoint unmarshals checkpoint from JSON
|
||||
func (cp *Checkpoint) UnmarshalCheckpoint(blob []byte) error {
|
||||
return json.Unmarshal(blob, cp)
|
||||
}
|
||||
|
||||
// VerifyChecksum verifies that current checksum
|
||||
// of checkpointed Data is valid
|
||||
func (cp *Checkpoint) VerifyChecksum() error {
|
||||
return cp.Checksum.Verify(cp.Data)
|
||||
}
|
||||
|
||||
// GetPodResourceAllocationInfo returns Pod Resource Allocation info states from checkpoint
|
||||
func (cp *Checkpoint) GetPodResourceAllocationInfo() (*PodResourceAllocationInfo, error) {
|
||||
var data PodResourceAllocationInfo
|
||||
if err := json.Unmarshal([]byte(cp.Data), &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &data, nil
|
||||
}
|
60
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state.go
generated
vendored
Normal file
60
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// PodResourceAllocation type is used in tracking resources allocated to pod's containers
|
||||
type PodResourceAllocation map[string]map[string]v1.ResourceRequirements
|
||||
|
||||
// PodResizeStatus type is used in tracking the last resize decision for pod
|
||||
type PodResizeStatus map[string]v1.PodResizeStatus
|
||||
|
||||
// Clone returns a copy of PodResourceAllocation
|
||||
func (pr PodResourceAllocation) Clone() PodResourceAllocation {
|
||||
prCopy := make(PodResourceAllocation)
|
||||
for pod := range pr {
|
||||
prCopy[pod] = make(map[string]v1.ResourceRequirements)
|
||||
for container, alloc := range pr[pod] {
|
||||
prCopy[pod][container] = *alloc.DeepCopy()
|
||||
}
|
||||
}
|
||||
return prCopy
|
||||
}
|
||||
|
||||
// Reader interface used to read current pod resource allocation state
|
||||
type Reader interface {
|
||||
GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool)
|
||||
GetPodResourceAllocation() PodResourceAllocation
|
||||
GetPodResizeStatus(podUID string) v1.PodResizeStatus
|
||||
}
|
||||
|
||||
type writer interface {
|
||||
SetContainerResourceAllocation(podUID string, containerName string, alloc v1.ResourceRequirements) error
|
||||
SetPodResourceAllocation(PodResourceAllocation) error
|
||||
SetPodResizeStatus(podUID string, resizeStatus v1.PodResizeStatus)
|
||||
Delete(podUID string, containerName string) error
|
||||
ClearState() error
|
||||
}
|
||||
|
||||
// State interface provides methods for tracking and setting pod resource allocation
|
||||
type State interface {
|
||||
Reader
|
||||
writer
|
||||
}
|
200
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_checkpoint.go
generated
vendored
Normal file
200
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_checkpoint.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
|
||||
)
|
||||
|
||||
var _ State = &stateCheckpoint{}
|
||||
|
||||
type stateCheckpoint struct {
|
||||
mux sync.RWMutex
|
||||
cache State
|
||||
checkpointManager checkpointmanager.CheckpointManager
|
||||
checkpointName string
|
||||
}
|
||||
|
||||
// NewStateCheckpoint creates new State for keeping track of pod resource allocations with checkpoint backend
|
||||
func NewStateCheckpoint(stateDir, checkpointName string) (State, error) {
|
||||
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize checkpoint manager for pod allocation tracking: %v", err)
|
||||
}
|
||||
stateCheckpoint := &stateCheckpoint{
|
||||
cache: NewStateMemory(),
|
||||
checkpointManager: checkpointManager,
|
||||
checkpointName: checkpointName,
|
||||
}
|
||||
|
||||
if err := stateCheckpoint.restoreState(); err != nil {
|
||||
//lint:ignore ST1005 user-facing error message
|
||||
return nil, fmt.Errorf("could not restore state from checkpoint: %v, please drain this node and delete pod allocation checkpoint file %q before restarting Kubelet", err, path.Join(stateDir, checkpointName))
|
||||
}
|
||||
return stateCheckpoint, nil
|
||||
}
|
||||
|
||||
// restores state from a checkpoint and creates it if it doesn't exist
|
||||
func (sc *stateCheckpoint) restoreState() error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
var err error
|
||||
|
||||
checkpoint, err := NewCheckpoint(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new checkpoint: %w", err)
|
||||
}
|
||||
|
||||
if err = sc.checkpointManager.GetCheckpoint(sc.checkpointName, checkpoint); err != nil {
|
||||
if err == errors.ErrCheckpointNotFound {
|
||||
return sc.storeState()
|
||||
}
|
||||
return err
|
||||
}
|
||||
praInfo, err := checkpoint.GetPodResourceAllocationInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod resource allocation info: %w", err)
|
||||
}
|
||||
err = sc.cache.SetPodResourceAllocation(praInfo.AllocationEntries)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set pod resource allocation: %w", err)
|
||||
}
|
||||
klog.V(2).InfoS("State checkpoint: restored pod resource allocation state from checkpoint")
|
||||
return nil
|
||||
}
|
||||
|
||||
// saves state to a checkpoint, caller is responsible for locking
|
||||
func (sc *stateCheckpoint) storeState() error {
|
||||
podAllocation := sc.cache.GetPodResourceAllocation()
|
||||
|
||||
checkpoint, err := NewCheckpoint(&PodResourceAllocationInfo{
|
||||
AllocationEntries: podAllocation,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create checkpoint: %w", err)
|
||||
}
|
||||
err = sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to save pod allocation checkpoint")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetContainerResourceAllocation returns current resources allocated to a pod's container
|
||||
func (sc *stateCheckpoint) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool) {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
return sc.cache.GetContainerResourceAllocation(podUID, containerName)
|
||||
}
|
||||
|
||||
// GetPodResourceAllocation returns current pod resource allocation
|
||||
func (sc *stateCheckpoint) GetPodResourceAllocation() PodResourceAllocation {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
return sc.cache.GetPodResourceAllocation()
|
||||
}
|
||||
|
||||
// GetPodResizeStatus returns the last resize decision for a pod
|
||||
func (sc *stateCheckpoint) GetPodResizeStatus(podUID string) v1.PodResizeStatus {
|
||||
sc.mux.RLock()
|
||||
defer sc.mux.RUnlock()
|
||||
return sc.cache.GetPodResizeStatus(podUID)
|
||||
}
|
||||
|
||||
// SetContainerResourceAllocation sets resources allocated to a pod's container
|
||||
func (sc *stateCheckpoint) SetContainerResourceAllocation(podUID string, containerName string, alloc v1.ResourceRequirements) error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetContainerResourceAllocation(podUID, containerName, alloc)
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
// SetPodResourceAllocation sets pod resource allocation
|
||||
func (sc *stateCheckpoint) SetPodResourceAllocation(a PodResourceAllocation) error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetPodResourceAllocation(a)
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
// SetPodResizeStatus sets the last resize decision for a pod
|
||||
func (sc *stateCheckpoint) SetPodResizeStatus(podUID string, resizeStatus v1.PodResizeStatus) {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.SetPodResizeStatus(podUID, resizeStatus)
|
||||
}
|
||||
|
||||
// Delete deletes allocations for specified pod
|
||||
func (sc *stateCheckpoint) Delete(podUID string, containerName string) error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.Delete(podUID, containerName)
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
// ClearState clears the state and saves it in a checkpoint
|
||||
func (sc *stateCheckpoint) ClearState() error {
|
||||
sc.mux.Lock()
|
||||
defer sc.mux.Unlock()
|
||||
sc.cache.ClearState()
|
||||
return sc.storeState()
|
||||
}
|
||||
|
||||
type noopStateCheckpoint struct{}
|
||||
|
||||
// NewNoopStateCheckpoint creates a dummy state checkpoint manager
|
||||
func NewNoopStateCheckpoint() State {
|
||||
return &noopStateCheckpoint{}
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) GetContainerResourceAllocation(_ string, _ string) (v1.ResourceRequirements, bool) {
|
||||
return v1.ResourceRequirements{}, false
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) GetPodResourceAllocation() PodResourceAllocation {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) GetPodResizeStatus(_ string) v1.PodResizeStatus {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) SetContainerResourceAllocation(_ string, _ string, _ v1.ResourceRequirements) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) SetPodResourceAllocation(_ PodResourceAllocation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) SetPodResizeStatus(_ string, _ v1.PodResizeStatus) {}
|
||||
|
||||
func (sc *noopStateCheckpoint) Delete(_ string, _ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *noopStateCheckpoint) ClearState() error {
|
||||
return nil
|
||||
}
|
128
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_mem.go
generated
vendored
Normal file
128
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/state/state_mem.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type stateMemory struct {
|
||||
sync.RWMutex
|
||||
podAllocation PodResourceAllocation
|
||||
podResizeStatus PodResizeStatus
|
||||
}
|
||||
|
||||
var _ State = &stateMemory{}
|
||||
|
||||
// NewStateMemory creates new State to track resources allocated to pods
|
||||
func NewStateMemory() State {
|
||||
klog.V(2).InfoS("Initialized new in-memory state store for pod resource allocation tracking")
|
||||
return &stateMemory{
|
||||
podAllocation: PodResourceAllocation{},
|
||||
podResizeStatus: PodResizeStatus{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stateMemory) GetContainerResourceAllocation(podUID string, containerName string) (v1.ResourceRequirements, bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
alloc, ok := s.podAllocation[podUID][containerName]
|
||||
return *alloc.DeepCopy(), ok
|
||||
}
|
||||
|
||||
func (s *stateMemory) GetPodResourceAllocation() PodResourceAllocation {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.podAllocation.Clone()
|
||||
}
|
||||
|
||||
func (s *stateMemory) GetPodResizeStatus(podUID string) v1.PodResizeStatus {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.podResizeStatus[podUID]
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetContainerResourceAllocation(podUID string, containerName string, alloc v1.ResourceRequirements) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if _, ok := s.podAllocation[podUID]; !ok {
|
||||
s.podAllocation[podUID] = make(map[string]v1.ResourceRequirements)
|
||||
}
|
||||
|
||||
s.podAllocation[podUID][containerName] = alloc
|
||||
klog.V(3).InfoS("Updated container resource allocation", "podUID", podUID, "containerName", containerName, "alloc", alloc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetPodResourceAllocation(a PodResourceAllocation) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.podAllocation = a.Clone()
|
||||
klog.V(3).InfoS("Updated pod resource allocation", "allocation", a)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stateMemory) SetPodResizeStatus(podUID string, resizeStatus v1.PodResizeStatus) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if resizeStatus != "" {
|
||||
s.podResizeStatus[podUID] = resizeStatus
|
||||
} else {
|
||||
delete(s.podResizeStatus, podUID)
|
||||
}
|
||||
klog.V(3).InfoS("Updated pod resize state", "podUID", podUID, "resizeStatus", resizeStatus)
|
||||
}
|
||||
|
||||
func (s *stateMemory) deleteContainer(podUID string, containerName string) {
|
||||
delete(s.podAllocation[podUID], containerName)
|
||||
if len(s.podAllocation[podUID]) == 0 {
|
||||
delete(s.podAllocation, podUID)
|
||||
delete(s.podResizeStatus, podUID)
|
||||
}
|
||||
klog.V(3).InfoS("Deleted pod resource allocation", "podUID", podUID, "containerName", containerName)
|
||||
}
|
||||
|
||||
func (s *stateMemory) Delete(podUID string, containerName string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if len(containerName) == 0 {
|
||||
delete(s.podAllocation, podUID)
|
||||
delete(s.podResizeStatus, podUID)
|
||||
klog.V(3).InfoS("Deleted pod resource allocation and resize state", "podUID", podUID)
|
||||
return nil
|
||||
}
|
||||
s.deleteContainer(podUID, containerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stateMemory) ClearState() error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.podAllocation = make(PodResourceAllocation)
|
||||
s.podResizeStatus = make(PodResizeStatus)
|
||||
klog.V(3).InfoS("Cleared state")
|
||||
return nil
|
||||
}
|
1171
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
Normal file
1171
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/status/status_manager.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user