mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to 1.28.0 in main
updating kubernetes to 1.28.0 in the main repo. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
b2fdc269c3
commit
ff3e84ad67
12
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
@ -236,8 +236,8 @@ func (m *PodControllerRefManager) AdoptPod(ctx context.Context, pod *v1.Pod) err
|
||||
// ReleasePod sends a patch to free the pod from the control of the controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *PodControllerRefManager) ReleasePod(ctx context.Context, pod *v1.Pod) error {
|
||||
klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(2).Info("Patching pod to remove its controllerRef", "pod", klog.KObj(pod), "gvk", m.controllerKind, "controller", m.Controller.GetName())
|
||||
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(pod.UID, []types.UID{m.Controller.GetUID()}, m.finalizers...)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -361,8 +361,8 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(ctx context.Context, rs
|
||||
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(ctx context.Context, replicaSet *apps.ReplicaSet) error {
|
||||
klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(2).Info("Patching ReplicaSet to remove its controllerRef", "replicaSet", klog.KObj(replicaSet), "gvk", m.controllerKind, "controller", m.Controller.GetName())
|
||||
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(replicaSet.UID, []types.UID{m.Controller.GetUID()})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -499,8 +499,8 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(ctx con
|
||||
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
|
||||
klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(2).Info("Patching ControllerRevision to remove its controllerRef", "controllerRevision", klog.KObj(history), "gvk", m.controllerKind, "controller", m.Controller.GetName())
|
||||
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(history.UID, []types.UID{m.Controller.GetUID()})
|
||||
if err != nil {
|
||||
return err
|
||||
|
137
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
137
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
@ -146,15 +146,15 @@ var ExpKeyFunc = func(obj interface{}) (string, error) {
|
||||
// types of controllers, because the keys might conflict across types.
|
||||
type ControllerExpectationsInterface interface {
|
||||
GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error)
|
||||
SatisfiedExpectations(controllerKey string) bool
|
||||
DeleteExpectations(controllerKey string)
|
||||
SetExpectations(controllerKey string, add, del int) error
|
||||
ExpectCreations(controllerKey string, adds int) error
|
||||
ExpectDeletions(controllerKey string, dels int) error
|
||||
CreationObserved(controllerKey string)
|
||||
DeletionObserved(controllerKey string)
|
||||
RaiseExpectations(controllerKey string, add, del int)
|
||||
LowerExpectations(controllerKey string, add, del int)
|
||||
SatisfiedExpectations(logger klog.Logger, controllerKey string) bool
|
||||
DeleteExpectations(logger klog.Logger, controllerKey string)
|
||||
SetExpectations(logger klog.Logger, controllerKey string, add, del int) error
|
||||
ExpectCreations(logger klog.Logger, controllerKey string, adds int) error
|
||||
ExpectDeletions(logger klog.Logger, controllerKey string, dels int) error
|
||||
CreationObserved(logger klog.Logger, controllerKey string)
|
||||
DeletionObserved(logger klog.Logger, controllerKey string)
|
||||
RaiseExpectations(logger klog.Logger, controllerKey string, add, del int)
|
||||
LowerExpectations(logger klog.Logger, controllerKey string, add, del int)
|
||||
}
|
||||
|
||||
// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync.
|
||||
@ -172,10 +172,11 @@ func (r *ControllerExpectations) GetExpectations(controllerKey string) (*Control
|
||||
}
|
||||
|
||||
// DeleteExpectations deletes the expectations of the given controller from the TTLStore.
|
||||
func (r *ControllerExpectations) DeleteExpectations(controllerKey string) {
|
||||
func (r *ControllerExpectations) DeleteExpectations(logger klog.Logger, controllerKey string) {
|
||||
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
|
||||
if err := r.Delete(exp); err != nil {
|
||||
klog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err)
|
||||
|
||||
logger.V(2).Info("Error deleting expectations", "controller", controllerKey, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -183,27 +184,27 @@ func (r *ControllerExpectations) DeleteExpectations(controllerKey string) {
|
||||
// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed.
|
||||
// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller
|
||||
// manager.
|
||||
func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
func (r *ControllerExpectations) SatisfiedExpectations(logger klog.Logger, controllerKey string) bool {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); exists {
|
||||
if exp.Fulfilled() {
|
||||
klog.V(4).Infof("Controller expectations fulfilled %#v", exp)
|
||||
logger.V(4).Info("Controller expectations fulfilled", "expectations", exp)
|
||||
return true
|
||||
} else if exp.isExpired() {
|
||||
klog.V(4).Infof("Controller expectations expired %#v", exp)
|
||||
logger.V(4).Info("Controller expectations expired", "expectations", exp)
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).Infof("Controller still waiting on expectations %#v", exp)
|
||||
logger.V(4).Info("Controller still waiting on expectations", "expectations", exp)
|
||||
return false
|
||||
}
|
||||
} else if err != nil {
|
||||
klog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err)
|
||||
logger.V(2).Info("Error encountered while checking expectations, forcing sync", "err", err)
|
||||
} else {
|
||||
// When a new controller is created, it doesn't have expectations.
|
||||
// When it doesn't see expected watch events for > TTL, the expectations expire.
|
||||
// - In this case it wakes up, creates/deletes controllees, and sets expectations again.
|
||||
// When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire.
|
||||
// - In this case it continues without setting expectations till it needs to create/delete controllees.
|
||||
klog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey)
|
||||
logger.V(4).Info("Controller either never recorded expectations, or the ttl expired", "controller", controllerKey)
|
||||
}
|
||||
// Trigger a sync if we either encountered and error (which shouldn't happen since we're
|
||||
// getting from local store) or this controller hasn't established expectations.
|
||||
@ -218,46 +219,46 @@ func (exp *ControlleeExpectations) isExpired() bool {
|
||||
}
|
||||
|
||||
// SetExpectations registers new expectations for the given controller. Forgets existing expectations.
|
||||
func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error {
|
||||
func (r *ControllerExpectations) SetExpectations(logger klog.Logger, controllerKey string, add, del int) error {
|
||||
exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()}
|
||||
klog.V(4).Infof("Setting expectations %#v", exp)
|
||||
logger.V(4).Info("Setting expectations", "expectations", exp)
|
||||
return r.Add(exp)
|
||||
}
|
||||
|
||||
func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) error {
|
||||
return r.SetExpectations(controllerKey, adds, 0)
|
||||
func (r *ControllerExpectations) ExpectCreations(logger klog.Logger, controllerKey string, adds int) error {
|
||||
return r.SetExpectations(logger, controllerKey, adds, 0)
|
||||
}
|
||||
|
||||
func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) error {
|
||||
return r.SetExpectations(controllerKey, 0, dels)
|
||||
func (r *ControllerExpectations) ExpectDeletions(logger klog.Logger, controllerKey string, dels int) error {
|
||||
return r.SetExpectations(logger, controllerKey, 0, dels)
|
||||
}
|
||||
|
||||
// Decrements the expectation counts of the given controller.
|
||||
func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) {
|
||||
func (r *ControllerExpectations) LowerExpectations(logger klog.Logger, controllerKey string, add, del int) {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(-add), int64(-del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
klog.V(4).Infof("Lowered expectations %#v", exp)
|
||||
logger.V(4).Info("Lowered expectations", "expectations", exp)
|
||||
}
|
||||
}
|
||||
|
||||
// Increments the expectation counts of the given controller.
|
||||
func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) {
|
||||
func (r *ControllerExpectations) RaiseExpectations(logger klog.Logger, controllerKey string, add, del int) {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(add), int64(del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
klog.V(4).Infof("Raised expectations %#v", exp)
|
||||
logger.V(4).Info("Raised expectations", "expectations", exp)
|
||||
}
|
||||
}
|
||||
|
||||
// CreationObserved atomically decrements the `add` expectation count of the given controller.
|
||||
func (r *ControllerExpectations) CreationObserved(controllerKey string) {
|
||||
r.LowerExpectations(controllerKey, 1, 0)
|
||||
func (r *ControllerExpectations) CreationObserved(logger klog.Logger, controllerKey string) {
|
||||
r.LowerExpectations(logger, controllerKey, 1, 0)
|
||||
}
|
||||
|
||||
// DeletionObserved atomically decrements the `del` expectation count of the given controller.
|
||||
func (r *ControllerExpectations) DeletionObserved(controllerKey string) {
|
||||
r.LowerExpectations(controllerKey, 0, 1)
|
||||
func (r *ControllerExpectations) DeletionObserved(logger klog.Logger, controllerKey string) {
|
||||
r.LowerExpectations(logger, controllerKey, 0, 1)
|
||||
}
|
||||
|
||||
// ControlleeExpectations track controllee creates/deletes.
|
||||
@ -287,6 +288,20 @@ func (e *ControlleeExpectations) GetExpectations() (int64, int64) {
|
||||
return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del)
|
||||
}
|
||||
|
||||
// MarshalLog makes a thread-safe copy of the values of the expectations that
|
||||
// can be used for logging.
|
||||
func (e *ControlleeExpectations) MarshalLog() interface{} {
|
||||
return struct {
|
||||
add int64
|
||||
del int64
|
||||
key string
|
||||
}{
|
||||
add: atomic.LoadInt64(&e.add),
|
||||
del: atomic.LoadInt64(&e.del),
|
||||
key: e.key,
|
||||
}
|
||||
}
|
||||
|
||||
// NewControllerExpectations returns a store for ControllerExpectations.
|
||||
func NewControllerExpectations() *ControllerExpectations {
|
||||
return &ControllerExpectations{cache.NewStore(ExpKeyFunc)}
|
||||
@ -335,47 +350,47 @@ func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.S
|
||||
}
|
||||
|
||||
// ExpectDeletions records expectations for the given deleteKeys, against the given controller.
|
||||
func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) error {
|
||||
func (u *UIDTrackingControllerExpectations) ExpectDeletions(logger klog.Logger, rcKey string, deletedKeys []string) error {
|
||||
expectedUIDs := sets.NewString()
|
||||
for _, k := range deletedKeys {
|
||||
expectedUIDs.Insert(k)
|
||||
}
|
||||
klog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys)
|
||||
logger.V(4).Info("Controller waiting on deletions", "controller", rcKey, "keys", deletedKeys)
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 {
|
||||
klog.Errorf("Clobbering existing delete keys: %+v", existing)
|
||||
logger.Error(nil, "Clobbering existing delete keys", "keys", existing)
|
||||
}
|
||||
if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil {
|
||||
return err
|
||||
}
|
||||
return u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len())
|
||||
return u.ControllerExpectationsInterface.ExpectDeletions(logger, rcKey, expectedUIDs.Len())
|
||||
}
|
||||
|
||||
// DeletionObserved records the given deleteKey as a deletion, for the given rc.
|
||||
func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) {
|
||||
func (u *UIDTrackingControllerExpectations) DeletionObserved(logger klog.Logger, rcKey, deleteKey string) {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
uids := u.GetUIDs(rcKey)
|
||||
if uids != nil && uids.Has(deleteKey) {
|
||||
klog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey)
|
||||
u.ControllerExpectationsInterface.DeletionObserved(rcKey)
|
||||
logger.V(4).Info("Controller received delete for pod", "controller", rcKey, "key", deleteKey)
|
||||
u.ControllerExpectationsInterface.DeletionObserved(logger, rcKey)
|
||||
uids.Delete(deleteKey)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the
|
||||
// underlying ControllerExpectationsInterface.
|
||||
func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) {
|
||||
func (u *UIDTrackingControllerExpectations) DeleteExpectations(logger klog.Logger, rcKey string) {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
u.ControllerExpectationsInterface.DeleteExpectations(rcKey)
|
||||
u.ControllerExpectationsInterface.DeleteExpectations(logger, rcKey)
|
||||
if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists {
|
||||
if err := u.uidStore.Delete(uidExp); err != nil {
|
||||
klog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err)
|
||||
logger.V(2).Info("Error deleting uid expectations", "controller", rcKey, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -573,12 +588,13 @@ func (r RealPodControl) createPods(ctx context.Context, namespace string, pod *v
|
||||
}
|
||||
return err
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
accessor, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
klog.Errorf("parentObject does not have ObjectMeta, %v", err)
|
||||
logger.Error(err, "parentObject does not have ObjectMeta")
|
||||
return nil
|
||||
}
|
||||
klog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name)
|
||||
logger.V(4).Info("Controller created pod", "controller", accessor.GetName(), "pod", klog.KObj(newPod))
|
||||
r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name)
|
||||
|
||||
return nil
|
||||
@ -589,10 +605,11 @@ func (r RealPodControl) DeletePod(ctx context.Context, namespace string, podID s
|
||||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
klog.V(2).InfoS("Deleting pod", "controller", accessor.GetName(), "pod", klog.KRef(namespace, podID))
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(2).Info("Deleting pod", "controller", accessor.GetName(), "pod", klog.KRef(namespace, podID))
|
||||
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(ctx, podID, metav1.DeleteOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof("pod %v/%v has already been deleted.", namespace, podID)
|
||||
logger.V(4).Info("Pod has already been deleted.", "pod", klog.KRef(namespace, podID))
|
||||
return err
|
||||
}
|
||||
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
||||
@ -929,25 +946,49 @@ func maxContainerRestarts(pod *v1.Pod) int {
|
||||
}
|
||||
|
||||
// FilterActivePods returns pods that have not terminated.
|
||||
func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
|
||||
func FilterActivePods(logger klog.Logger, pods []*v1.Pod) []*v1.Pod {
|
||||
var result []*v1.Pod
|
||||
for _, p := range pods {
|
||||
if IsPodActive(p) {
|
||||
result = append(result, p)
|
||||
} else {
|
||||
klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
|
||||
p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp)
|
||||
logger.V(4).Info("Ignoring inactive pod", "pod", klog.KObj(p), "phase", p.Status.Phase, "deletionTime", p.DeletionTimestamp)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func FilterTerminatingPods(pods []*v1.Pod) []*v1.Pod {
|
||||
var result []*v1.Pod
|
||||
for _, p := range pods {
|
||||
if IsPodTerminating(p) {
|
||||
result = append(result, p)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func CountTerminatingPods(pods []*v1.Pod) int32 {
|
||||
numberOfTerminatingPods := 0
|
||||
for _, p := range pods {
|
||||
if IsPodTerminating(p) {
|
||||
numberOfTerminatingPods += 1
|
||||
}
|
||||
}
|
||||
return int32(numberOfTerminatingPods)
|
||||
}
|
||||
|
||||
func IsPodActive(p *v1.Pod) bool {
|
||||
return v1.PodSucceeded != p.Status.Phase &&
|
||||
v1.PodFailed != p.Status.Phase &&
|
||||
p.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
func IsPodTerminating(p *v1.Pod) bool {
|
||||
return !podutil.IsPodTerminal(p) &&
|
||||
p.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods.
|
||||
func FilterActiveReplicaSets(replicaSets []*apps.ReplicaSet) []*apps.ReplicaSet {
|
||||
activeFilter := func(rs *apps.ReplicaSet) bool {
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -184,12 +184,12 @@ func SetDeploymentRevision(deployment *apps.Deployment, revision string) bool {
|
||||
}
|
||||
|
||||
// MaxRevision finds the highest revision in the replica sets
|
||||
func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
func MaxRevision(logger klog.Logger, allRSs []*apps.ReplicaSet) int64 {
|
||||
max := int64(0)
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
klog.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
|
||||
logger.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
|
||||
} else if v > max {
|
||||
max = v
|
||||
}
|
||||
@ -198,12 +198,12 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
}
|
||||
|
||||
// LastRevision finds the second max revision number in all replica sets (the last revision)
|
||||
func LastRevision(allRSs []*apps.ReplicaSet) int64 {
|
||||
func LastRevision(logger klog.Logger, allRSs []*apps.ReplicaSet) int64 {
|
||||
max, secMax := int64(0), int64(0)
|
||||
for _, rs := range allRSs {
|
||||
if v, err := Revision(rs); err != nil {
|
||||
// Skip the replica sets when it failed to parse their revision information
|
||||
klog.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
|
||||
logger.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
|
||||
} else if v >= max {
|
||||
secMax = max
|
||||
max = v
|
||||
@ -849,11 +849,11 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error
|
||||
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
||||
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
|
||||
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
|
||||
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt32(0)), int(desired), true)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
|
||||
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt32(0)), int(desired), false)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
92
vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
92
vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
@ -1,92 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"hash/fnv"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
type objectWithMeta interface {
|
||||
metav1.Object
|
||||
}
|
||||
|
||||
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.
|
||||
// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels,
|
||||
// they will have the same key.
|
||||
func keyFunc(obj objectWithMeta) uint64 {
|
||||
hash := fnv.New32a()
|
||||
hashutil.DeepHashObject(hash, &equivalenceLabelObj{
|
||||
namespace: obj.GetNamespace(),
|
||||
labels: obj.GetLabels(),
|
||||
})
|
||||
return uint64(hash.Sum32())
|
||||
}
|
||||
|
||||
type equivalenceLabelObj struct {
|
||||
namespace string
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
// MatchingCache save label and selector matching relationship
|
||||
type MatchingCache struct {
|
||||
mutex sync.RWMutex
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship.
|
||||
func NewMatchingCache(maxCacheEntries int) *MatchingCache {
|
||||
return &MatchingCache{
|
||||
cache: lru.New(maxCacheEntries),
|
||||
}
|
||||
}
|
||||
|
||||
// Add will add matching information to the cache.
|
||||
func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) {
|
||||
key := keyFunc(labelObj)
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.cache.Add(key, selectorObj)
|
||||
}
|
||||
|
||||
// GetMatchingObject lookup the matching object for a given object.
|
||||
// Note: the cache information may be invalid since the controller may be deleted or updated,
|
||||
// we need check in the external request to ensure the cache data is not dirty.
|
||||
func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) {
|
||||
key := keyFunc(labelObj)
|
||||
// NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state(
|
||||
// it need update the least recently usage information). So we can not call it concurrently.
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.cache.Get(key)
|
||||
}
|
||||
|
||||
// Update update the cached matching information.
|
||||
func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) {
|
||||
c.Add(labelObj, selectorObj)
|
||||
}
|
||||
|
||||
// InvalidateAll invalidate the whole cache.
|
||||
func (c *MatchingCache) InvalidateAll() {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.cache = lru.New(c.cache.MaxEntries)
|
||||
}
|
Reference in New Issue
Block a user