mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2
Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.26.2 to 1.27.2. - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.26.2...v1.27.2) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
committed by
mergify[bot]
parent
0e79135419
commit
07b05616a0
15
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
@ -143,9 +143,8 @@ type PodControllerRefManager struct {
|
||||
// If CanAdopt() returns a non-nil error, all adoptions will fail.
|
||||
//
|
||||
// NOTE: Once CanAdopt() is called, it will not be called again by the same
|
||||
//
|
||||
// PodControllerRefManager instance. Create a new instance if it makes
|
||||
// sense to check CanAdopt() again (e.g. in a different sync pass).
|
||||
// PodControllerRefManager instance. Create a new instance if it makes
|
||||
// sense to check CanAdopt() again (e.g. in a different sync pass).
|
||||
func NewPodControllerRefManager(
|
||||
podControl PodControlInterface,
|
||||
controller metav1.Object,
|
||||
@ -284,9 +283,8 @@ type ReplicaSetControllerRefManager struct {
|
||||
// If CanAdopt() returns a non-nil error, all adoptions will fail.
|
||||
//
|
||||
// NOTE: Once CanAdopt() is called, it will not be called again by the same
|
||||
//
|
||||
// ReplicaSetControllerRefManager instance. Create a new instance if it
|
||||
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
|
||||
// ReplicaSetControllerRefManager instance. Create a new instance if it
|
||||
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
|
||||
func NewReplicaSetControllerRefManager(
|
||||
rsControl RSControlInterface,
|
||||
controller metav1.Object,
|
||||
@ -423,9 +421,8 @@ type ControllerRevisionControllerRefManager struct {
|
||||
// If canAdopt() returns a non-nil error, all adoptions will fail.
|
||||
//
|
||||
// NOTE: Once canAdopt() is called, it will not be called again by the same
|
||||
//
|
||||
// ControllerRevisionControllerRefManager instance. Create a new instance if it
|
||||
// makes sense to check canAdopt() again (e.g. in a different sync pass).
|
||||
// ControllerRevisionControllerRefManager instance. Create a new instance if it
|
||||
// makes sense to check canAdopt() again (e.g. in a different sync pass).
|
||||
func NewControllerRevisionControllerRefManager(
|
||||
crControl ControllerRevisionControlInterface,
|
||||
controller metav1.Object,
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
@ -1039,12 +1039,12 @@ func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName
|
||||
var oldNode *v1.Node
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
option := metav1.GetOptions{}
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
option.ResourceVersion = "0"
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1096,12 +1096,12 @@ func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName str
|
||||
var oldNode *v1.Node
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
option := metav1.GetOptions{}
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
option.ResourceVersion = "0"
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1178,12 +1178,12 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
|
||||
var node *v1.Node
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
option := metav1.GetOptions{}
|
||||
if firstTry {
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
option.ResourceVersion = "0"
|
||||
firstTry = false
|
||||
} else {
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
}
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
43
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
43
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -189,7 +189,7 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
klog.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
|
||||
} else if v > max {
|
||||
max = v
|
||||
}
|
||||
@ -203,7 +203,7 @@ func LastRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
|
||||
klog.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
|
||||
} else if v >= max {
|
||||
secMax = max
|
||||
max = v
|
||||
@ -229,7 +229,8 @@ func Revision(obj runtime.Object) (int64, error) {
|
||||
|
||||
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
|
||||
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
|
||||
func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool, revHistoryLimitInChars int) bool {
|
||||
func SetNewReplicaSetAnnotations(ctx context.Context, deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool, revHistoryLimitInChars int) bool {
|
||||
logger := klog.FromContext(ctx)
|
||||
// First, copy deployment's annotations (except for apply and revision annotations)
|
||||
annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS)
|
||||
// Then, update replica set's revision annotation
|
||||
@ -244,7 +245,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
|
||||
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
|
||||
if err != nil {
|
||||
if oldRevision != "" {
|
||||
klog.Warningf("Updating replica set revision OldRevision not int %s", err)
|
||||
logger.Info("Updating replica set revision OldRevision not int", "err", err)
|
||||
return false
|
||||
}
|
||||
//If the RS annotation is empty then initialise it to 0
|
||||
@ -252,13 +253,13 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
|
||||
}
|
||||
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
|
||||
if err != nil {
|
||||
klog.Warningf("Updating replica set revision NewRevision not int %s", err)
|
||||
logger.Info("Updating replica set revision NewRevision not int", "err", err)
|
||||
return false
|
||||
}
|
||||
if oldRevisionInt < newRevisionInt {
|
||||
newRS.Annotations[RevisionAnnotation] = newRevision
|
||||
annotationChanged = true
|
||||
klog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
|
||||
logger.V(4).Info("Updating replica set revision", "replicaSet", klog.KObj(newRS), "newRevision", newRevision)
|
||||
}
|
||||
// If a revision annotation already existed and this replica set was updated with a new revision
|
||||
// then that means we are rolling back to this replica set. We need to preserve the old revisions
|
||||
@ -280,7 +281,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
|
||||
oldRevisions = append(oldRevisions[start:], oldRevision)
|
||||
newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",")
|
||||
} else {
|
||||
klog.Warningf("Not appending revision due to length limit of %v reached", revHistoryLimitInChars)
|
||||
logger.Info("Not appending revision due to revision history length limit reached", "revisionHistoryLimit", revHistoryLimitInChars)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -303,7 +304,7 @@ var annotationsToSkip = map[string]bool{
|
||||
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
|
||||
// TODO: How to decide which annotations should / should not be copied?
|
||||
//
|
||||
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
|
||||
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
|
||||
func skipCopyAnnotation(key string) bool {
|
||||
return annotationsToSkip[key]
|
||||
}
|
||||
@ -376,22 +377,22 @@ func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps
|
||||
}
|
||||
|
||||
// GetDesiredReplicasAnnotation returns the number of desired replicas
|
||||
func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
|
||||
return getIntFromAnnotation(rs, DesiredReplicasAnnotation)
|
||||
func GetDesiredReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
|
||||
return getIntFromAnnotation(logger, rs, DesiredReplicasAnnotation)
|
||||
}
|
||||
|
||||
func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
|
||||
return getIntFromAnnotation(rs, MaxReplicasAnnotation)
|
||||
func getMaxReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
|
||||
return getIntFromAnnotation(logger, rs, MaxReplicasAnnotation)
|
||||
}
|
||||
|
||||
func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
|
||||
func getIntFromAnnotation(logger klog.Logger, rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
|
||||
annotationValue, ok := rs.Annotations[annotationKey]
|
||||
if !ok {
|
||||
return int32(0), false
|
||||
}
|
||||
intValue, err := strconv.Atoi(annotationValue)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
|
||||
logger.V(2).Info("Could not convert the value with annotation key for the replica set", "annotationValue", annotationValue, "annotationKey", annotationKey, "replicaSet", klog.KObj(rs))
|
||||
return int32(0), false
|
||||
}
|
||||
return int32(intValue), true
|
||||
@ -466,12 +467,12 @@ func MaxSurge(deployment apps.Deployment) int32 {
|
||||
// GetProportion will estimate the proportion for the provided replica set using 1. the current size
|
||||
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
|
||||
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
|
||||
func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
|
||||
func GetProportion(logger klog.Logger, rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
|
||||
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
|
||||
return int32(0)
|
||||
}
|
||||
|
||||
rsFraction := getReplicaSetFraction(*rs, d)
|
||||
rsFraction := getReplicaSetFraction(logger, *rs, d)
|
||||
allowed := deploymentReplicasToAdd - deploymentReplicasAdded
|
||||
|
||||
if deploymentReplicasToAdd > 0 {
|
||||
@ -488,14 +489,14 @@ func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToA
|
||||
|
||||
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
|
||||
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
|
||||
func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 {
|
||||
func getReplicaSetFraction(logger klog.Logger, rs apps.ReplicaSet, d apps.Deployment) int32 {
|
||||
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
|
||||
if *(d.Spec.Replicas) == int32(0) {
|
||||
return -*(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d)
|
||||
annotatedReplicas, ok := getMaxReplicasAnnotation(&rs)
|
||||
annotatedReplicas, ok := getMaxReplicasAnnotation(logger, &rs)
|
||||
if !ok {
|
||||
// If we cannot find the annotation then fallback to the current deployment size. Note that this
|
||||
// will not be an accurate proportion estimation in case other replica sets have different values
|
||||
@ -734,7 +735,7 @@ var nowFn = func() time.Time { return time.Now() }
|
||||
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
|
||||
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
|
||||
// exists.
|
||||
func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
|
||||
func DeploymentTimedOut(ctx context.Context, deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
|
||||
if !HasProgressDeadline(deployment) {
|
||||
return false
|
||||
}
|
||||
@ -763,7 +764,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS
|
||||
if condition.Reason == TimedOutReason {
|
||||
return true
|
||||
}
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
// Look at the difference in seconds between now and the last time we reported any
|
||||
// progress or tried to create a replica set, or resumed a paused deployment and
|
||||
// compare against progressDeadlineSeconds.
|
||||
@ -772,7 +773,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS
|
||||
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
|
||||
timedOut := from.Add(delta).Before(now)
|
||||
|
||||
klog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
|
||||
logger.V(4).Info("Deployment timed out from last progress check", "deployment", klog.KObj(deployment), "timeout", timedOut, "from", from, "now", now)
|
||||
return timedOut
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user