mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to v1.20.0
updated kubernetes packages to latest release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
4abe128bd8
commit
83559144b1
43
vendor/k8s.io/kubectl/pkg/scale/scale.go
generated
vendored
43
vendor/k8s.io/kubectl/pkg/scale/scale.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
)
|
||||
@ -37,10 +38,10 @@ type Scaler interface {
|
||||
// retries in the event of resource version mismatch (if retry is not nil),
|
||||
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
|
||||
// TODO: Make the implementation of this watch-based (#56075) once #31345 is fixed.
|
||||
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams, gvr schema.GroupVersionResource) error
|
||||
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams, gvr schema.GroupVersionResource, dryRun bool) error
|
||||
// ScaleSimple does a simple one-shot attempt at scaling - not useful on its own, but
|
||||
// a necessary building block for Scale
|
||||
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gvr schema.GroupVersionResource) (updatedResourceVersion string, err error)
|
||||
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gvr schema.GroupVersionResource, dryRun bool) (updatedResourceVersion string, err error)
|
||||
}
|
||||
|
||||
// NewScaler get a scaler for a given resource
|
||||
@ -79,9 +80,9 @@ func NewRetryParams(interval, timeout time.Duration) *RetryParams {
|
||||
}
|
||||
|
||||
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
|
||||
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gvr schema.GroupVersionResource) wait.ConditionFunc {
|
||||
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint, updatedResourceVersion *string, gvr schema.GroupVersionResource, dryRun bool) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
rv, err := r.ScaleSimple(namespace, name, precondition, count, gvr)
|
||||
rv, err := r.ScaleSimple(namespace, name, precondition, count, gvr, dryRun)
|
||||
if updatedResourceVersion != nil {
|
||||
*updatedResourceVersion = rv
|
||||
}
|
||||
@ -115,7 +116,7 @@ type genericScaler struct {
|
||||
var _ Scaler = &genericScaler{}
|
||||
|
||||
// ScaleSimple updates a scale of a given resource. It returns the resourceVersion of the scale if the update was successful.
|
||||
func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gvr schema.GroupVersionResource) (updatedResourceVersion string, err error) {
|
||||
func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint, gvr schema.GroupVersionResource, dryRun bool) (updatedResourceVersion string, err error) {
|
||||
if preconditions != nil {
|
||||
scale, err := s.scaleNamespacer.Scales(namespace).Get(context.TODO(), gvr.GroupResource(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -125,15 +126,37 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale
|
||||
return "", err
|
||||
}
|
||||
scale.Spec.Replicas = int32(newSize)
|
||||
updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(context.TODO(), gvr.GroupResource(), scale, metav1.UpdateOptions{})
|
||||
updateOptions := metav1.UpdateOptions{}
|
||||
if dryRun {
|
||||
updateOptions.DryRun = []string{metav1.DryRunAll}
|
||||
}
|
||||
updatedScale, err := s.scaleNamespacer.Scales(namespace).Update(context.TODO(), gvr.GroupResource(), scale, updateOptions)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return updatedScale.ResourceVersion, nil
|
||||
}
|
||||
|
||||
patch := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, newSize))
|
||||
updatedScale, err := s.scaleNamespacer.Scales(namespace).Patch(context.TODO(), gvr, name, types.MergePatchType, patch, metav1.PatchOptions{})
|
||||
// objectForReplicas is used for encoding scale patch
|
||||
type objectForReplicas struct {
|
||||
Replicas uint `json:"replicas"`
|
||||
}
|
||||
// objectForSpec is used for encoding scale patch
|
||||
type objectForSpec struct {
|
||||
Spec objectForReplicas `json:"spec"`
|
||||
}
|
||||
spec := objectForSpec{
|
||||
Spec: objectForReplicas{Replicas: newSize},
|
||||
}
|
||||
patch, err := json.Marshal(&spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
patchOptions := metav1.PatchOptions{}
|
||||
if dryRun {
|
||||
patchOptions.DryRun = []string{metav1.DryRunAll}
|
||||
}
|
||||
updatedScale, err := s.scaleNamespacer.Scales(namespace).Patch(context.TODO(), gvr, name, types.MergePatchType, patch, patchOptions)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -142,12 +165,12 @@ func (s *genericScaler) ScaleSimple(namespace, name string, preconditions *Scale
|
||||
|
||||
// Scale updates a scale of a given resource to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
|
||||
func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams, gvr schema.GroupVersionResource) error {
|
||||
func (s *genericScaler) Scale(namespace, resourceName string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams, gvr schema.GroupVersionResource, dryRun bool) error {
|
||||
if retry == nil {
|
||||
// make it try only once, immediately
|
||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
}
|
||||
cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil, gvr)
|
||||
cond := ScaleCondition(s, preconditions, namespace, resourceName, newSize, nil, gvr, dryRun)
|
||||
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
return err
|
||||
}
|
||||
|
188
vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go
generated
vendored
Normal file
188
vendor/k8s.io/kubectl/pkg/util/podutils/podutils.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podutils
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/integer"
|
||||
)
|
||||
|
||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||
// Precondition for an available pod is that it must be ready. On top
|
||||
// of that, there are two cases when a pod can be considered available:
|
||||
// 1. minReadySeconds == 0, or
|
||||
// 2. LastTransitionTime (is set) + minReadySeconds < current time
|
||||
func IsPodAvailable(pod *corev1.Pod, minReadySeconds int32, now metav1.Time) bool {
|
||||
if !IsPodReady(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := getPodReadyCondition(pod.Status)
|
||||
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
|
||||
if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodReady returns true if a pod is ready; false otherwise.
|
||||
func IsPodReady(pod *corev1.Pod) bool {
|
||||
return isPodReadyConditionTrue(pod.Status)
|
||||
}
|
||||
|
||||
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
|
||||
func isPodReadyConditionTrue(status corev1.PodStatus) bool {
|
||||
condition := getPodReadyCondition(status)
|
||||
return condition != nil && condition.Status == corev1.ConditionTrue
|
||||
}
|
||||
|
||||
// GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
|
||||
// Returns nil if the condition is not present.
|
||||
func getPodReadyCondition(status corev1.PodStatus) *corev1.PodCondition {
|
||||
_, condition := getPodCondition(&status, corev1.PodReady)
|
||||
return condition
|
||||
}
|
||||
|
||||
// GetPodCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func getPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
return getPodConditionFromList(status.Conditions, conditionType)
|
||||
}
|
||||
|
||||
// GetPodConditionFromList extracts the provided condition from the given list of condition and
|
||||
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
|
||||
func getPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
|
||||
if conditions == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range conditions {
|
||||
if conditions[i].Type == conditionType {
|
||||
return i, &conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
|
||||
type ByLogging []*corev1.Pod
|
||||
|
||||
func (s ByLogging) Len() int { return len(s) }
|
||||
func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s ByLogging) Less(i, j int) bool {
|
||||
// 1. assigned < unassigned
|
||||
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
|
||||
return len(s[i].Spec.NodeName) > 0
|
||||
}
|
||||
// 2. PodRunning < PodUnknown < PodPending
|
||||
m := map[corev1.PodPhase]int{corev1.PodRunning: 0, corev1.PodUnknown: 1, corev1.PodPending: 2}
|
||||
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
|
||||
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
|
||||
}
|
||||
// 3. ready < not ready
|
||||
if IsPodReady(s[i]) != IsPodReady(s[j]) {
|
||||
return IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for more time < less time < empty time
|
||||
if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
||||
}
|
||||
// 6. older pods < newer pods < empty timestamp pods
|
||||
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
||||
return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
|
||||
type ActivePods []*corev1.Pod
|
||||
|
||||
func (s ActivePods) Len() int { return len(s) }
|
||||
func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s ActivePods) Less(i, j int) bool {
|
||||
// 1. Unassigned < assigned
|
||||
// If only one of the pods is unassigned, the unassigned one is smaller
|
||||
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
|
||||
return len(s[i].Spec.NodeName) == 0
|
||||
}
|
||||
// 2. PodPending < PodUnknown < PodRunning
|
||||
m := map[corev1.PodPhase]int{corev1.PodPending: 0, corev1.PodUnknown: 1, corev1.PodRunning: 2}
|
||||
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
|
||||
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
|
||||
}
|
||||
// 3. Not ready < ready
|
||||
// If only one of the pods is not ready, the not ready one is smaller
|
||||
if IsPodReady(s[i]) != IsPodReady(s[j]) {
|
||||
return !IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for empty time < less time < more time
|
||||
// If both pods are ready, the latest ready one is smaller
|
||||
if IsPodReady(s[i]) && IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
||||
}
|
||||
// 6. Empty creation time pods < newer pods < older pods
|
||||
if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) {
|
||||
return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// afterOrZero checks if time t1 is after time t2; if one of them
|
||||
// is zero, the zero time is seen as after non-zero time.
|
||||
func afterOrZero(t1, t2 *metav1.Time) bool {
|
||||
if t1.Time.IsZero() || t2.Time.IsZero() {
|
||||
return t1.Time.IsZero()
|
||||
}
|
||||
return t1.After(t2.Time)
|
||||
}
|
||||
|
||||
func podReadyTime(pod *corev1.Pod) *metav1.Time {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
// we only care about pod ready conditions
|
||||
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
|
||||
return &c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
return &metav1.Time{}
|
||||
}
|
||||
|
||||
func maxContainerRestarts(pod *corev1.Pod) int {
|
||||
maxRestarts := 0
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
|
||||
}
|
||||
return maxRestarts
|
||||
}
|
Reference in New Issue
Block a user