rebase: update kubernetes to v1.23.0

updating go dependency to latest kubernetes
released version i.e v1.23.0

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2021-12-08 19:20:47 +05:30
committed by mergify[bot]
parent 42403e2ba7
commit 5762da3e91
789 changed files with 49781 additions and 11501 deletions

View File

@ -33,6 +33,7 @@ import (
"time"
"k8s.io/client-go/util/keyutil"
netutils "k8s.io/utils/net"
)
const duration365d = time.Hour * 24 * 365
@ -157,7 +158,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a
BasicConstraintsValid: true,
}
if ip := net.ParseIP(host); ip != nil {
if ip := netutils.ParseIPSloppy(host); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, host)

View File

@ -17,10 +17,12 @@ limitations under the License.
package flowcontrol
import (
"math/rand"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/utils/clock"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/integer"
)
@ -35,23 +37,43 @@ type Backoff struct {
defaultDuration time.Duration
maxDuration time.Duration
perItemBackoff map[string]*backoffEntry
rand *rand.Rand
// maxJitterFactor adds jitter to the exponentially backed off delay.
// if maxJitterFactor is zero, no jitter is added to the delay in
// order to maintain current behavior.
maxJitterFactor float64
}
func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
return &Backoff{
perItemBackoff: map[string]*backoffEntry{},
Clock: tc,
defaultDuration: initial,
maxDuration: max,
}
func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff {
return newBackoff(tc, initial, max, 0.0)
}
func NewBackOff(initial, max time.Duration) *Backoff {
return NewBackOffWithJitter(initial, max, 0.0)
}
func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff {
return newBackoff(tc, initial, max, maxJitterFactor)
}
func NewBackOffWithJitter(initial, max time.Duration, maxJitterFactor float64) *Backoff {
clock := clock.RealClock{}
return newBackoff(clock, initial, max, maxJitterFactor)
}
func newBackoff(clock clock.Clock, initial, max time.Duration, maxJitterFactor float64) *Backoff {
var random *rand.Rand
if maxJitterFactor > 0 {
random = rand.New(rand.NewSource(clock.Now().UnixNano()))
}
return &Backoff{
perItemBackoff: map[string]*backoffEntry{},
Clock: clock.RealClock{},
Clock: clock,
defaultDuration: initial,
maxDuration: max,
maxJitterFactor: maxJitterFactor,
rand: random,
}
}
@ -74,8 +96,10 @@ func (p *Backoff) Next(id string, eventTime time.Time) {
entry, ok := p.perItemBackoff[id]
if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
entry = p.initEntryUnsafe(id)
entry.backoff += p.jitter(entry.backoff)
} else {
delay := entry.backoff * 2 // exponential
delay := entry.backoff * 2 // exponential
delay += p.jitter(entry.backoff) // add some jitter to the delay
entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
}
entry.lastUpdate = p.Clock.Now()
@ -143,6 +167,14 @@ func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
return entry
}
func (p *Backoff) jitter(delay time.Duration) time.Duration {
if p.rand == nil {
return 0
}
return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay))
}
// After 2*maxDuration we restart the backoff factor to the beginning
func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration

View File

@ -23,26 +23,36 @@ import (
"time"
"golang.org/x/time/rate"
"k8s.io/utils/clock"
)
type RateLimiter interface {
type PassiveRateLimiter interface {
// TryAccept returns true if a token is taken immediately. Otherwise,
// it returns false.
TryAccept() bool
// Accept returns once a token becomes available.
Accept()
// Stop stops the rate limiter, subsequent calls to CanAccept will return false
Stop()
// QPS returns QPS of this rate limiter
QPS() float32
}
type RateLimiter interface {
PassiveRateLimiter
// Accept returns once a token becomes available.
Accept()
// Wait returns nil if a token is taken before the Context is done.
Wait(ctx context.Context) error
}
type tokenBucketRateLimiter struct {
type tokenBucketPassiveRateLimiter struct {
limiter *rate.Limiter
clock Clock
qps float32
clock clock.PassiveClock
}
type tokenBucketRateLimiter struct {
tokenBucketPassiveRateLimiter
clock Clock
}
// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
@ -52,58 +62,73 @@ type tokenBucketRateLimiter struct {
// The maximum number of tokens in the bucket is capped at 'burst'.
func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiter(limiter, realClock{}, qps)
return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps)
}
// NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns
// a PassiveRateLimiter which does not have Accept() and Wait() methods.
func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps)
}
// An injectable, mockable clock interface.
type Clock interface {
Now() time.Time
clock.PassiveClock
Sleep(time.Duration)
}
type realClock struct{}
func (realClock) Now() time.Time {
return time.Now()
}
func (realClock) Sleep(d time.Duration) {
time.Sleep(d)
}
var _ Clock = (*clock.RealClock)(nil)
// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
// but allows an injectable clock, for testing.
func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiter(limiter, c, qps)
return newTokenBucketRateLimiterWithClock(limiter, c, qps)
}
func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter {
// NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock
// except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods
// and uses a PassiveClock.
func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter {
limiter := rate.NewLimiter(rate.Limit(qps), burst)
return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps)
}
func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter {
return &tokenBucketRateLimiter{
limiter: limiter,
clock: c,
qps: qps,
tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps),
clock: c,
}
}
func (t *tokenBucketRateLimiter) TryAccept() bool {
return t.limiter.AllowN(t.clock.Now(), 1)
func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter {
return &tokenBucketPassiveRateLimiter{
limiter: limiter,
qps: qps,
clock: c,
}
}
func (tbprl *tokenBucketPassiveRateLimiter) Stop() {
}
func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 {
return tbprl.qps
}
func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool {
return tbprl.limiter.AllowN(tbprl.clock.Now(), 1)
}
// Accept will block until a token becomes available
func (t *tokenBucketRateLimiter) Accept() {
now := t.clock.Now()
t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now))
func (tbrl *tokenBucketRateLimiter) Accept() {
now := tbrl.clock.Now()
tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now))
}
func (t *tokenBucketRateLimiter) Stop() {
}
func (t *tokenBucketRateLimiter) QPS() float32 {
return t.qps
}
func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error {
return t.limiter.Wait(ctx)
func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error {
return tbrl.limiter.Wait(ctx)
}
type fakeAlwaysRateLimiter struct{}
@ -157,3 +182,11 @@ func (t *fakeNeverRateLimiter) QPS() float32 {
func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
return errors.New("can not be accept")
}
var (
_ RateLimiter = (*tokenBucketRateLimiter)(nil)
_ RateLimiter = (*fakeAlwaysRateLimiter)(nil)
_ RateLimiter = (*fakeNeverRateLimiter)(nil)
)
var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil)

View File

@ -79,7 +79,7 @@ func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error)
// // if you got a conflict on the last update attempt then you need to get
// // the current version before making your own changes.
// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{})
// if err ! nil {
// if err != nil {
// return err
// }
//

View File

@ -27,7 +27,7 @@ import (
type RateLimiter interface {
// When gets an item and gets to decide how long that item should wait
When(item interface{}) time.Duration
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing
// or for success, we'll stop tracking it
Forget(item interface{})
// NumRequeues returns back how many failures the item has had
@ -209,3 +209,30 @@ func (r *MaxOfRateLimiter) Forget(item interface{}) {
limiter.Forget(item)
}
}
// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long
type WithMaxWaitRateLimiter struct {
limiter RateLimiter
maxDelay time.Duration
}
func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter {
return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay}
}
func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration {
delay := w.limiter.When(item)
if delay > w.maxDelay {
return w.maxDelay
}
return delay
}
func (w WithMaxWaitRateLimiter) Forget(item interface{}) {
w.limiter.Forget(item)
}
func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int {
return w.limiter.NumRequeues(item)
}

View File

@ -21,8 +21,8 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
@ -51,11 +51,11 @@ func NewNamedDelayingQueue(name string) DelayingInterface {
// NewDelayingQueueWithCustomClock constructs a new named workqueue
// with ability to inject real or fake clock for testing purposes
func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface {
func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface {
return newDelayingQueue(clock, NewNamed(name), name)
}
func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType {
func newDelayingQueue(clock clock.WithTicker, q Interface, name string) *delayingType {
ret := &delayingType{
Interface: q,
clock: clock,

View File

@ -20,7 +20,7 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/utils/clock"
)
// This file provides abstractions for setting the provider (e.g., prometheus)

View File

@ -20,7 +20,7 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/utils/clock"
)
type Interface interface {
@ -29,6 +29,7 @@ type Interface interface {
Get() (item interface{}, shutdown bool)
Done(item interface{})
ShutDown()
ShutDownWithDrain()
ShuttingDown() bool
}
@ -46,7 +47,7 @@ func NewNamed(name string) *Type {
)
}
func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type {
func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type {
t := &Type{
clock: c,
dirty: set{},
@ -86,11 +87,12 @@ type Type struct {
cond *sync.Cond
shuttingDown bool
drain bool
metrics queueMetrics
unfinishedWorkUpdatePeriod time.Duration
clock clock.Clock
clock clock.WithTicker
}
type empty struct{}
@ -110,6 +112,10 @@ func (s set) delete(item t) {
delete(s, item)
}
func (s set) len() int {
return len(s)
}
// Add marks item as needing processing.
func (q *Type) Add(item interface{}) {
q.cond.L.Lock()
@ -155,7 +161,10 @@ func (q *Type) Get() (item interface{}, shutdown bool) {
return nil, true
}
item, q.queue = q.queue[0], q.queue[1:]
item = q.queue[0]
// The underlying array still exists and reference this object, so the object will not be garbage collected.
q.queue[0] = nil
q.queue = q.queue[1:]
q.metrics.get(item)
@ -178,13 +187,71 @@ func (q *Type) Done(item interface{}) {
if q.dirty.has(item) {
q.queue = append(q.queue, item)
q.cond.Signal()
} else if q.processing.len() == 0 {
q.cond.Signal()
}
}
// ShutDown will cause q to ignore all new items added to it. As soon as the
// worker goroutines have drained the existing items in the queue, they will be
// instructed to exit.
// ShutDown will cause q to ignore all new items added to it and
// immediately instruct the worker goroutines to exit.
func (q *Type) ShutDown() {
q.setDrain(false)
q.shutdown()
}
// ShutDownWithDrain will cause q to ignore all new items added to it. As soon
// as the worker goroutines have "drained", i.e: finished processing and called
// Done on all existing items in the queue; they will be instructed to exit and
// ShutDownWithDrain will return. Hence: a strict requirement for using this is;
// your workers must ensure that Done is called on all items in the queue once
// the shut down has been initiated, if that is not the case: this will block
// indefinitely. It is, however, safe to call ShutDown after having called
// ShutDownWithDrain, as to force the queue shut down to terminate immediately
// without waiting for the drainage.
func (q *Type) ShutDownWithDrain() {
q.setDrain(true)
q.shutdown()
for q.isProcessing() && q.shouldDrain() {
q.waitForProcessing()
}
}
// isProcessing indicates if there are still items on the work queue being
// processed. It's used to drain the work queue on an eventual shutdown.
func (q *Type) isProcessing() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.processing.len() != 0
}
// waitForProcessing waits for the worker goroutines to finish processing items
// and call Done on them.
func (q *Type) waitForProcessing() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
// Ensure that we do not wait on a queue which is already empty, as that
// could result in waiting for Done to be called on items in an empty queue
// which has already been shut down, which will result in waiting
// indefinitely.
if q.processing.len() == 0 {
return
}
q.cond.Wait()
}
func (q *Type) setDrain(shouldDrain bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.drain = shouldDrain
}
func (q *Type) shouldDrain() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.drain
}
func (q *Type) shutdown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.shuttingDown = true