mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: bump the k8s-dependencies group in /e2e with 3 updates
Bumps the k8s-dependencies group in /e2e with 3 updates: [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery), [k8s.io/cloud-provider](https://github.com/kubernetes/cloud-provider) and [k8s.io/pod-security-admission](https://github.com/kubernetes/pod-security-admission). Updates `k8s.io/apimachinery` from 0.32.3 to 0.33.0 - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.32.3...v0.33.0) Updates `k8s.io/cloud-provider` from 0.32.3 to 0.33.0 - [Commits](https://github.com/kubernetes/cloud-provider/compare/v0.32.3...v0.33.0) Updates `k8s.io/pod-security-admission` from 0.32.3 to 0.33.0 - [Commits](https://github.com/kubernetes/pod-security-admission/compare/v0.32.3...v0.33.0) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies - dependency-name: k8s.io/cloud-provider dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies - dependency-name: k8s.io/pod-security-admission dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
d52dc2c4ba
commit
dd77e72800
104
e2e/vendor/k8s.io/component-helpers/resource/helpers.go
generated
vendored
104
e2e/vendor/k8s.io/component-helpers/resource/helpers.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package resource
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
@ -56,14 +58,14 @@ type PodResourcesOptions struct {
|
||||
var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory)
|
||||
|
||||
func SupportedPodLevelResources() sets.Set[v1.ResourceName] {
|
||||
return supportedPodLevelResources
|
||||
return supportedPodLevelResources.Clone().Insert(v1.ResourceHugePagesPrefix)
|
||||
}
|
||||
|
||||
// IsSupportedPodLevelResources checks if a given resource is supported by pod-level
|
||||
// resource management through the PodLevelResources feature. Returns true if
|
||||
// the resource is supported.
|
||||
func IsSupportedPodLevelResource(name v1.ResourceName) bool {
|
||||
return supportedPodLevelResources.Has(name)
|
||||
return supportedPodLevelResources.Has(name) || strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
|
||||
}
|
||||
|
||||
// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set.
|
||||
@ -144,10 +146,13 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
||||
reqs := reuseOrClearResourceList(opts.Reuse)
|
||||
var containerStatuses map[string]*v1.ContainerStatus
|
||||
if opts.UseStatusResources {
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||
}
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@ -155,11 +160,7 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
||||
if opts.UseStatusResources {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
containerReqs = cs.Resources.Requests.DeepCopy()
|
||||
} else {
|
||||
containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
|
||||
}
|
||||
containerReqs = determineContainerReqs(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
|
||||
@ -177,7 +178,6 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
||||
restartableInitContainerReqs := v1.ResourceList{}
|
||||
initContainerReqs := v1.ResourceList{}
|
||||
// init containers define the minimum of any resource
|
||||
// Note: In-place resize is not allowed for InitContainers, so no need to check for ResizeStatus value
|
||||
//
|
||||
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
|
||||
// init container is initializing, then
|
||||
@ -186,6 +186,15 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
containerReqs := container.Resources.Requests
|
||||
if opts.UseStatusResources {
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
containerReqs = determineContainerReqs(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(opts.NonMissingContainerRequests) > 0 {
|
||||
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
|
||||
}
|
||||
@ -214,6 +223,42 @@ func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.Resour
|
||||
return reqs
|
||||
}
|
||||
|
||||
// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
|
||||
func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
|
||||
if IsPodResizeInfeasible(pod) {
|
||||
return max(cs.Resources.Requests, cs.AllocatedResources)
|
||||
}
|
||||
return max(container.Resources.Requests, cs.Resources.Requests, cs.AllocatedResources)
|
||||
}
|
||||
|
||||
// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not.
|
||||
func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
|
||||
if IsPodResizeInfeasible(pod) {
|
||||
return cs.Resources.Limits.DeepCopy()
|
||||
}
|
||||
return max(container.Resources.Limits, cs.Resources.Limits)
|
||||
}
|
||||
|
||||
// IsPodResizeInfeasible returns true if the pod condition PodResizePending is set to infeasible.
|
||||
func IsPodResizeInfeasible(pod *v1.Pod) bool {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodResizePending {
|
||||
return condition.Reason == v1.PodReasonInfeasible
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodResizeDeferred returns true if the pod condition PodResizePending is set to deferred.
|
||||
func IsPodResizeDeferred(pod *v1.Pod) bool {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodResizePending {
|
||||
return condition.Reason == v1.PodReasonDeferred
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values
|
||||
func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList {
|
||||
cp := v1.ResourceList{}
|
||||
@ -267,10 +312,13 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
|
||||
limits := reuseOrClearResourceList(opts.Reuse)
|
||||
var containerStatuses map[string]*v1.ContainerStatus
|
||||
if opts.UseStatusResources {
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||
}
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@ -278,11 +326,7 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
|
||||
if opts.UseStatusResources {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
containerLimits = cs.Resources.Limits.DeepCopy()
|
||||
} else {
|
||||
containerLimits = max(container.Resources.Limits, cs.Resources.Limits)
|
||||
}
|
||||
containerLimits = determineContainerLimits(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
|
||||
@ -303,6 +347,15 @@ func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.Resource
|
||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
containerLimits := container.Resources.Limits
|
||||
if opts.UseStatusResources {
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
containerLimits = determineContainerLimits(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Is the init container marked as a restartable init container?
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||
addResourceList(limits, containerLimits)
|
||||
@ -348,23 +401,12 @@ func maxResourceList(list, newList v1.ResourceList) {
|
||||
}
|
||||
}
|
||||
|
||||
// max returns the result of max(a, b) for each named resource and is only used if we can't
|
||||
// max returns the result of max(a, b...) for each named resource and is only used if we can't
|
||||
// accumulate into an existing resource list
|
||||
func max(a v1.ResourceList, b v1.ResourceList) v1.ResourceList {
|
||||
result := v1.ResourceList{}
|
||||
for key, value := range a {
|
||||
if other, found := b[key]; found {
|
||||
if value.Cmp(other) <= 0 {
|
||||
result[key] = other.DeepCopy()
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
for key, value := range b {
|
||||
if _, found := result[key]; !found {
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList {
|
||||
result := a.DeepCopy()
|
||||
for _, other := range b {
|
||||
maxResourceList(result, other)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/component-helpers/scheduling/corev1/doc.go
generated
vendored
2
e2e/vendor/k8s.io/component-helpers/scheduling/corev1/doc.go
generated
vendored
@ -20,4 +20,4 @@ limitations under the License.
|
||||
// - Be used by a core component and another kubernetes project (cluster-autoscaler, descheduler)
|
||||
//
|
||||
// And be a scheduling feature.
|
||||
package corev1 // import "k8s.io/component-helpers/scheduling/corev1"
|
||||
package corev1
|
||||
|
Reference in New Issue
Block a user