mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
25
vendor/k8s.io/kubectl/pkg/scale/scale.go
generated
vendored
25
vendor/k8s.io/kubectl/pkg/scale/scale.go
generated
vendored
@ -18,12 +18,13 @@ package scale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -80,14 +81,14 @@ func NewRetryParams(interval, timeout time.Duration) *RetryParams {
|
||||
}
|
||||
|
||||
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
|
||||
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gvr schema.GroupVersionResource, dryRun bool) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gvr schema.GroupVersionResource, dryRun bool) wait.ConditionWithContextFunc {
|
||||
return func(context.Context) (bool, error) {
|
||||
rv, err := r.ScaleSimple(namespace, name, precondition, count, gvr, dryRun)
|
||||
if updatedResourceVersion != nil {
|
||||
*updatedResourceVersion = rv
|
||||
}
|
||||
// Retry only on update conflicts.
|
||||
if errors.IsConflict(err) {
|
||||
if apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -171,7 +172,7 @@ func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, prec
|
||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
}
|
||||
cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil, gvr, dryRun)
|
||||
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), retry.Interval, retry.Timeout, true, cond); err != nil {
|
||||
return err
|
||||
}
|
||||
if waitForReplicas != nil {
|
||||
@ -182,9 +183,9 @@ func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, prec
|
||||
|
||||
// scaleHasDesiredReplicas returns a condition that will be true if and only if the desired replica
|
||||
// count for a scale (Spec) equals its updated replicas count (Status)
|
||||
func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, desiredReplicas int32) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
actualScale, err := sClient.Scales(namespace).Get(context.TODO(), gr, resourceName, metav1.GetOptions{})
|
||||
func scaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.GroupResource, resourceName string, namespace string, desiredReplicas int32) wait.ConditionWithContextFunc {
|
||||
return func(ctx context.Context) (bool, error) {
|
||||
actualScale, err := sClient.Scales(namespace).Get(ctx, gr, resourceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -203,11 +204,9 @@ func WaitForScaleHasDesiredReplicas(sClient scaleclient.ScalesGetter, gr schema.
|
||||
if waitForReplicas == nil {
|
||||
return fmt.Errorf("waitForReplicas parameter cannot be nil")
|
||||
}
|
||||
err := wait.PollImmediate(
|
||||
waitForReplicas.Interval,
|
||||
waitForReplicas.Timeout,
|
||||
scaleHasDesiredReplicas(sClient, gr, resourceName, namespace, int32(newSize)))
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err := wait.PollUntilContextTimeout(context.Background(), waitForReplicas.Interval, waitForReplicas.Timeout, true, scaleHasDesiredReplicas(sClient, gr, resourceName, namespace, int32(newSize)))
|
||||
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return fmt.Errorf("timed out waiting for %q to be synced", resourceName)
|
||||
}
|
||||
return err
|
||||
|
46
vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go
generated
vendored
46
vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||
@ -113,8 +114,8 @@ func (s ByLogging) Less(i, j int) bool {
|
||||
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
||||
if res := compareMaxContainerRestarts(s[i], s[j]); res != nil {
|
||||
return *res
|
||||
}
|
||||
// 6. older pods < newer pods < empty timestamp pods
|
||||
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
||||
@ -161,8 +162,8 @@ func (s ActivePods) Less(i, j int) bool {
|
||||
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
||||
}
|
||||
// 7. Pods with containers with higher restart counts < lower restart counts
|
||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
||||
if res := compareMaxContainerRestarts(s[i], s[j]); res != nil {
|
||||
return *res
|
||||
}
|
||||
// 8. Empty creation time pods < newer pods < older pods
|
||||
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
||||
@ -190,12 +191,41 @@ func podReadyTime(pod *corev1.Pod) *metav1.Time {
|
||||
return &metav1.Time{}
|
||||
}
|
||||
|
||||
func maxContainerRestarts(pod *corev1.Pod) int {
|
||||
maxRestarts := 0
|
||||
func maxContainerRestarts(pod *corev1.Pod) (regularRestarts, sidecarRestarts int) {
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
maxRestarts = max(maxRestarts, int(c.RestartCount))
|
||||
regularRestarts = max(regularRestarts, int(c.RestartCount))
|
||||
}
|
||||
return maxRestarts
|
||||
names := sets.New[string]()
|
||||
for _, c := range pod.Spec.InitContainers {
|
||||
if c.RestartPolicy != nil && *c.RestartPolicy == corev1.ContainerRestartPolicyAlways {
|
||||
names.Insert(c.Name)
|
||||
}
|
||||
}
|
||||
for _, c := range pod.Status.InitContainerStatuses {
|
||||
if names.Has(c.Name) {
|
||||
sidecarRestarts = max(sidecarRestarts, int(c.RestartCount))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// We use *bool here to determine equality:
|
||||
// true: pi has a higher container restart count.
|
||||
// false: pj has a higher container restart count.
|
||||
// nil: Both have the same container restart count.
|
||||
func compareMaxContainerRestarts(pi *corev1.Pod, pj *corev1.Pod) *bool {
|
||||
regularRestartsI, sidecarRestartsI := maxContainerRestarts(pi)
|
||||
regularRestartsJ, sidecarRestartsJ := maxContainerRestarts(pj)
|
||||
if regularRestartsI != regularRestartsJ {
|
||||
res := regularRestartsI > regularRestartsJ
|
||||
return &res
|
||||
}
|
||||
// If pods have the same restart count, an attempt is made to compare the restart counts of sidecar containers.
|
||||
if sidecarRestartsI != sidecarRestartsJ {
|
||||
res := sidecarRestartsI > sidecarRestartsJ
|
||||
return &res
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContainerType and VisitContainers are taken from
|
||||
|
Reference in New Issue
Block a user