mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Updated vednor files
This commit is contained in:
45
vendor/k8s.io/client-go/util/flowcontrol/BUILD
generated
vendored
45
vendor/k8s.io/client-go/util/flowcontrol/BUILD
generated
vendored
@ -1,45 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"backoff_test.go",
|
||||
"throttle_test.go",
|
||||
],
|
||||
importpath = "k8s.io/client-go/util/flowcontrol",
|
||||
library = ":go_default_library",
|
||||
deps = ["//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"backoff.go",
|
||||
"throttle.go",
|
||||
],
|
||||
importpath = "k8s.io/client-go/util/flowcontrol",
|
||||
deps = [
|
||||
"//vendor/github.com/juju/ratelimit:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/integer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
149
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
149
vendor/k8s.io/client-go/util/flowcontrol/backoff.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/util/integer"
|
||||
)
|
||||
|
||||
type backoffEntry struct {
|
||||
backoff time.Duration
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
type Backoff struct {
|
||||
sync.Mutex
|
||||
Clock clock.Clock
|
||||
defaultDuration time.Duration
|
||||
maxDuration time.Duration
|
||||
perItemBackoff map[string]*backoffEntry
|
||||
}
|
||||
|
||||
func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
|
||||
return &Backoff{
|
||||
perItemBackoff: map[string]*backoffEntry{},
|
||||
Clock: tc,
|
||||
defaultDuration: initial,
|
||||
maxDuration: max,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBackOff(initial, max time.Duration) *Backoff {
|
||||
return &Backoff{
|
||||
perItemBackoff: map[string]*backoffEntry{},
|
||||
Clock: clock.RealClock{},
|
||||
defaultDuration: initial,
|
||||
maxDuration: max,
|
||||
}
|
||||
}
|
||||
|
||||
// Get the current backoff Duration
|
||||
func (p *Backoff) Get(id string) time.Duration {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
var delay time.Duration
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if ok {
|
||||
delay = entry.backoff
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
// move backoff to the next mark, capping at maxDuration
|
||||
func (p *Backoff) Next(id string, eventTime time.Time) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
entry = p.initEntryUnsafe(id)
|
||||
} else {
|
||||
delay := entry.backoff * 2 // exponential
|
||||
entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
|
||||
}
|
||||
entry.lastUpdate = p.Clock.Now()
|
||||
}
|
||||
|
||||
// Reset forces clearing of all backoff data for a given key.
|
||||
func (p *Backoff) Reset(id string) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.perItemBackoff, id)
|
||||
}
|
||||
|
||||
// Returns True if the elapsed time since eventTime is smaller than the current backoff window
|
||||
func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
return false
|
||||
}
|
||||
return p.Clock.Now().Sub(eventTime) < entry.backoff
|
||||
}
|
||||
|
||||
// Returns True if time since lastupdate is less than the current backoff window.
|
||||
func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
entry, ok := p.perItemBackoff[id]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
|
||||
return false
|
||||
}
|
||||
return eventTime.Sub(entry.lastUpdate) < entry.backoff
|
||||
}
|
||||
|
||||
// Garbage collect records that have aged past maxDuration. Backoff users are expected
|
||||
// to invoke this periodically.
|
||||
func (p *Backoff) GC() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
now := p.Clock.Now()
|
||||
for id, entry := range p.perItemBackoff {
|
||||
if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
|
||||
// GC when entry has not been updated for 2*maxDuration
|
||||
delete(p.perItemBackoff, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Backoff) DeleteEntry(id string) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
delete(p.perItemBackoff, id)
|
||||
}
|
||||
|
||||
// Take a lock on *Backoff, before calling initEntryUnsafe
|
||||
func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
|
||||
entry := &backoffEntry{backoff: p.defaultDuration}
|
||||
p.perItemBackoff[id] = entry
|
||||
return entry
|
||||
}
|
||||
|
||||
// After 2*maxDuration we restart the backoff factor to the beginning
|
||||
func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
|
||||
return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
|
||||
}
|
195
vendor/k8s.io/client-go/util/flowcontrol/backoff_test.go
generated
vendored
195
vendor/k8s.io/client-go/util/flowcontrol/backoff_test.go
generated
vendored
@ -1,195 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
)
|
||||
|
||||
func TestSlowBackoff(t *testing.T) {
|
||||
id := "_idSlow"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 50 * step
|
||||
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
cases := []time.Duration{0, 1, 2, 4, 8, 16, 32, 50, 50, 50}
|
||||
for ix, c := range cases {
|
||||
tc.Step(step)
|
||||
w := b.Get(id)
|
||||
if w != c*step {
|
||||
t.Errorf("input: '%d': expected %s, got %s", ix, c*step, w)
|
||||
}
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
//Now confirm that the Reset cancels backoff.
|
||||
b.Next(id, tc.Now())
|
||||
b.Reset(id)
|
||||
if b.Get(id) != 0 {
|
||||
t.Errorf("Reset didn't clear the backoff.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBackoffReset(t *testing.T) {
|
||||
id := "_idReset"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := step * 5
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
startTime := tc.Now()
|
||||
|
||||
// get to backoff = maxDuration
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
// backoff should be capped at maxDuration
|
||||
if !b.IsInBackOffSince(id, tc.Now()) {
|
||||
t.Errorf("expected to be in Backoff got %s", b.Get(id))
|
||||
}
|
||||
|
||||
lastUpdate := tc.Now()
|
||||
tc.Step(2*maxDuration + step) // time += 11s, 11 > 2*maxDuration
|
||||
if b.IsInBackOffSince(id, lastUpdate) {
|
||||
t.Errorf("expected to not be in Backoff after reset (start=%s, now=%s, lastUpdate=%s), got %s", startTime, tc.Now(), lastUpdate, b.Get(id))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffHightWaterMark(t *testing.T) {
|
||||
id := "_idHiWaterMark"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
||||
// get to backoff = maxDuration
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
|
||||
// backoff high watermark expires after 2*maxDuration
|
||||
tc.Step(maxDuration + step)
|
||||
b.Next(id, tc.Now())
|
||||
|
||||
if b.Get(id) != maxDuration {
|
||||
t.Errorf("expected Backoff to stay at high watermark %s got %s", maxDuration, b.Get(id))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffGC(t *testing.T) {
|
||||
id := "_idGC"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 5 * step
|
||||
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
|
||||
for i := 0; i <= int(maxDuration/step); i++ {
|
||||
tc.Step(step)
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
lastUpdate := tc.Now()
|
||||
tc.Step(maxDuration + step)
|
||||
b.GC()
|
||||
_, found := b.perItemBackoff[id]
|
||||
if !found {
|
||||
t.Errorf("expected GC to skip entry, elapsed time=%s maxDuration=%s", tc.Now().Sub(lastUpdate), maxDuration)
|
||||
}
|
||||
|
||||
tc.Step(maxDuration + step)
|
||||
b.GC()
|
||||
r, found := b.perItemBackoff[id]
|
||||
if found {
|
||||
t.Errorf("expected GC of entry after %s got entry %v", tc.Now().Sub(lastUpdate), r)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsInBackOffSinceUpdate(t *testing.T) {
|
||||
id := "_idIsInBackOffSinceUpdate"
|
||||
tc := clock.NewFakeClock(time.Now())
|
||||
step := time.Second
|
||||
maxDuration := 10 * step
|
||||
b := NewFakeBackOff(step, maxDuration, tc)
|
||||
startTime := tc.Now()
|
||||
|
||||
cases := []struct {
|
||||
tick time.Duration
|
||||
inBackOff bool
|
||||
value int
|
||||
}{
|
||||
{tick: 0, inBackOff: false, value: 0},
|
||||
{tick: 1, inBackOff: false, value: 1},
|
||||
{tick: 2, inBackOff: true, value: 2},
|
||||
{tick: 3, inBackOff: false, value: 2},
|
||||
{tick: 4, inBackOff: true, value: 4},
|
||||
{tick: 5, inBackOff: true, value: 4},
|
||||
{tick: 6, inBackOff: true, value: 4},
|
||||
{tick: 7, inBackOff: false, value: 4},
|
||||
{tick: 8, inBackOff: true, value: 8},
|
||||
{tick: 9, inBackOff: true, value: 8},
|
||||
{tick: 10, inBackOff: true, value: 8},
|
||||
{tick: 11, inBackOff: true, value: 8},
|
||||
{tick: 12, inBackOff: true, value: 8},
|
||||
{tick: 13, inBackOff: true, value: 8},
|
||||
{tick: 14, inBackOff: true, value: 8},
|
||||
{tick: 15, inBackOff: false, value: 8},
|
||||
{tick: 16, inBackOff: true, value: 10},
|
||||
{tick: 17, inBackOff: true, value: 10},
|
||||
{tick: 18, inBackOff: true, value: 10},
|
||||
{tick: 19, inBackOff: true, value: 10},
|
||||
{tick: 20, inBackOff: true, value: 10},
|
||||
{tick: 21, inBackOff: true, value: 10},
|
||||
{tick: 22, inBackOff: true, value: 10},
|
||||
{tick: 23, inBackOff: true, value: 10},
|
||||
{tick: 24, inBackOff: true, value: 10},
|
||||
{tick: 25, inBackOff: false, value: 10},
|
||||
{tick: 26, inBackOff: true, value: 10},
|
||||
{tick: 27, inBackOff: true, value: 10},
|
||||
{tick: 28, inBackOff: true, value: 10},
|
||||
{tick: 29, inBackOff: true, value: 10},
|
||||
{tick: 30, inBackOff: true, value: 10},
|
||||
{tick: 31, inBackOff: true, value: 10},
|
||||
{tick: 32, inBackOff: true, value: 10},
|
||||
{tick: 33, inBackOff: true, value: 10},
|
||||
{tick: 34, inBackOff: true, value: 10},
|
||||
{tick: 35, inBackOff: false, value: 10},
|
||||
{tick: 56, inBackOff: false, value: 0},
|
||||
{tick: 57, inBackOff: false, value: 1},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
tc.SetTime(startTime.Add(c.tick * step))
|
||||
if c.inBackOff != b.IsInBackOffSinceUpdate(id, tc.Now()) {
|
||||
t.Errorf("expected IsInBackOffSinceUpdate %v got %v at tick %s", c.inBackOff, b.IsInBackOffSinceUpdate(id, tc.Now()), c.tick*step)
|
||||
}
|
||||
|
||||
if c.inBackOff && (time.Duration(c.value)*step != b.Get(id)) {
|
||||
t.Errorf("expected backoff value=%s got %s at tick %s", time.Duration(c.value)*step, b.Get(id), c.tick*step)
|
||||
}
|
||||
|
||||
if !c.inBackOff {
|
||||
b.Next(id, tc.Now())
|
||||
}
|
||||
}
|
||||
}
|
148
vendor/k8s.io/client-go/util/flowcontrol/throttle.go
generated
vendored
148
vendor/k8s.io/client-go/util/flowcontrol/throttle.go
generated
vendored
@ -1,148 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
type RateLimiter interface {
|
||||
// TryAccept returns true if a token is taken immediately. Otherwise,
|
||||
// it returns false.
|
||||
TryAccept() bool
|
||||
// Accept returns once a token becomes available.
|
||||
Accept()
|
||||
// Stop stops the rate limiter, subsequent calls to CanAccept will return false
|
||||
Stop()
|
||||
// Saturation returns a percentage number which describes how saturated
|
||||
// this rate limiter is.
|
||||
// Usually we use token bucket rate limiter. In that case,
|
||||
// 1.0 means no tokens are available; 0.0 means we have a full bucket of tokens to use.
|
||||
Saturation() float64
|
||||
// QPS returns QPS of this rate limiter
|
||||
QPS() float32
|
||||
}
|
||||
|
||||
type tokenBucketRateLimiter struct {
|
||||
limiter *ratelimit.Bucket
|
||||
qps float32
|
||||
}
|
||||
|
||||
// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
|
||||
// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
|
||||
// smoothed qps rate of 'qps'.
|
||||
// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
|
||||
// The maximum number of tokens in the bucket is capped at 'burst'.
|
||||
func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
|
||||
limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst))
|
||||
return newTokenBucketRateLimiter(limiter, qps)
|
||||
}
|
||||
|
||||
// An injectable, mockable clock interface.
|
||||
type Clock interface {
|
||||
ratelimit.Clock
|
||||
}
|
||||
|
||||
// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
|
||||
// but allows an injectable clock, for testing.
|
||||
func NewTokenBucketRateLimiterWithClock(qps float32, burst int, clock Clock) RateLimiter {
|
||||
limiter := ratelimit.NewBucketWithRateAndClock(float64(qps), int64(burst), clock)
|
||||
return newTokenBucketRateLimiter(limiter, qps)
|
||||
}
|
||||
|
||||
func newTokenBucketRateLimiter(limiter *ratelimit.Bucket, qps float32) RateLimiter {
|
||||
return &tokenBucketRateLimiter{
|
||||
limiter: limiter,
|
||||
qps: qps,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) TryAccept() bool {
|
||||
return t.limiter.TakeAvailable(1) == 1
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) Saturation() float64 {
|
||||
capacity := t.limiter.Capacity()
|
||||
avail := t.limiter.Available()
|
||||
return float64(capacity-avail) / float64(capacity)
|
||||
}
|
||||
|
||||
// Accept will block until a token becomes available
|
||||
func (t *tokenBucketRateLimiter) Accept() {
|
||||
t.limiter.Wait(1)
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) Stop() {
|
||||
}
|
||||
|
||||
func (t *tokenBucketRateLimiter) QPS() float32 {
|
||||
return t.qps
|
||||
}
|
||||
|
||||
type fakeAlwaysRateLimiter struct{}
|
||||
|
||||
func NewFakeAlwaysRateLimiter() RateLimiter {
|
||||
return &fakeAlwaysRateLimiter{}
|
||||
}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) TryAccept() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) Saturation() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) Stop() {}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) Accept() {}
|
||||
|
||||
func (t *fakeAlwaysRateLimiter) QPS() float32 {
|
||||
return 1
|
||||
}
|
||||
|
||||
type fakeNeverRateLimiter struct {
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewFakeNeverRateLimiter() RateLimiter {
|
||||
rl := fakeNeverRateLimiter{}
|
||||
rl.wg.Add(1)
|
||||
return &rl
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) TryAccept() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) Saturation() float64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) Stop() {
|
||||
t.wg.Done()
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) Accept() {
|
||||
t.wg.Wait()
|
||||
}
|
||||
|
||||
func (t *fakeNeverRateLimiter) QPS() float32 {
|
||||
return 1
|
||||
}
|
177
vendor/k8s.io/client-go/util/flowcontrol/throttle_test.go
generated
vendored
177
vendor/k8s.io/client-go/util/flowcontrol/throttle_test.go
generated
vendored
@ -1,177 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flowcontrol
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMultithreadedThrottling(t *testing.T) {
|
||||
// Bucket with 100QPS and no burst
|
||||
r := NewTokenBucketRateLimiter(100, 1)
|
||||
|
||||
// channel to collect 100 tokens
|
||||
taken := make(chan bool, 100)
|
||||
|
||||
// Set up goroutines to hammer the throttler
|
||||
startCh := make(chan bool)
|
||||
endCh := make(chan bool)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
// wait for the starting signal
|
||||
<-startCh
|
||||
for {
|
||||
// get a token
|
||||
r.Accept()
|
||||
select {
|
||||
// try to add it to the taken channel
|
||||
case taken <- true:
|
||||
continue
|
||||
// if taken is full, notify and return
|
||||
default:
|
||||
endCh <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// record wall time
|
||||
startTime := time.Now()
|
||||
// take the initial capacity so all tokens are the result of refill
|
||||
r.Accept()
|
||||
// start the thundering herd
|
||||
close(startCh)
|
||||
// wait for the first signal that we collected 100 tokens
|
||||
<-endCh
|
||||
// record wall time
|
||||
endTime := time.Now()
|
||||
|
||||
// tolerate a 1% clock change because these things happen
|
||||
if duration := endTime.Sub(startTime); duration < (time.Second * 99 / 100) {
|
||||
// We shouldn't be able to get 100 tokens out of the bucket in less than 1 second of wall clock time, no matter what
|
||||
t.Errorf("Expected it to take at least 1 second to get 100 tokens, took %v", duration)
|
||||
} else {
|
||||
t.Logf("Took %v to get 100 tokens", duration)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(1, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
}
|
||||
if r.TryAccept() {
|
||||
t.Error("unexpected true accept")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrementThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(1, 1)
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
if r.TryAccept() {
|
||||
t.Error("unexpected true accept")
|
||||
}
|
||||
|
||||
// Allow to refill
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
if !r.TryAccept() {
|
||||
t.Error("unexpected false accept")
|
||||
}
|
||||
}
|
||||
|
||||
func TestThrottle(t *testing.T) {
|
||||
r := NewTokenBucketRateLimiter(10, 5)
|
||||
|
||||
// Should consume 5 tokens immediately, then
|
||||
// the remaining 11 should take at least 1 second (0.1s each)
|
||||
expectedFinish := time.Now().Add(time.Second * 1)
|
||||
for i := 0; i < 16; i++ {
|
||||
r.Accept()
|
||||
}
|
||||
if time.Now().Before(expectedFinish) {
|
||||
t.Error("rate limit was not respected, finished too early")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiterSaturation(t *testing.T) {
|
||||
const e = 0.000001
|
||||
tests := []struct {
|
||||
capacity int
|
||||
take int
|
||||
|
||||
expectedSaturation float64
|
||||
}{
|
||||
{1, 1, 1},
|
||||
{10, 3, 0.3},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
rl := NewTokenBucketRateLimiter(1, tt.capacity)
|
||||
for i := 0; i < tt.take; i++ {
|
||||
rl.Accept()
|
||||
}
|
||||
if math.Abs(rl.Saturation()-tt.expectedSaturation) > e {
|
||||
t.Fatalf("#%d: Saturation rate difference isn't within tolerable range\n want=%f, get=%f",
|
||||
i, tt.expectedSaturation, rl.Saturation())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlwaysFake(t *testing.T) {
|
||||
rl := NewFakeAlwaysRateLimiter()
|
||||
if !rl.TryAccept() {
|
||||
t.Error("TryAccept in AlwaysFake should return true.")
|
||||
}
|
||||
// If this will block the test will timeout
|
||||
rl.Accept()
|
||||
}
|
||||
|
||||
func TestNeverFake(t *testing.T) {
|
||||
rl := NewFakeNeverRateLimiter()
|
||||
if rl.TryAccept() {
|
||||
t.Error("TryAccept in NeverFake should return false.")
|
||||
}
|
||||
|
||||
finished := false
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
rl.Accept()
|
||||
finished = true
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Wait some time to make sure it never finished.
|
||||
time.Sleep(time.Second)
|
||||
if finished {
|
||||
t.Error("Accept should block forever in NeverFake.")
|
||||
}
|
||||
|
||||
rl.Stop()
|
||||
wg.Wait()
|
||||
if !finished {
|
||||
t.Error("Stop should make Accept unblock in NeverFake.")
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user