mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update k8s.io packages to v0.29.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
328a264202
commit
f080b9e0c9
7
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
7
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
@ -18,7 +18,6 @@ package fairqueuing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/debug"
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/metrics"
|
||||
@ -117,7 +116,7 @@ type QueuingConfig struct {
|
||||
|
||||
// DesiredNumQueues is the number of queues that the API says
|
||||
// should exist now. This may be non-positive, in which case
|
||||
// QueueLengthLimit, HandSize, and RequestWaitLimit are ignored.
|
||||
// QueueLengthLimit, and HandSize are ignored.
|
||||
// A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig.
|
||||
// A negative value means to always dispatch immediately upon arrival
|
||||
// (i.e., the requests are "exempt" from limitation).
|
||||
@ -129,10 +128,6 @@ type QueuingConfig struct {
|
||||
// HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly
|
||||
// dealing a "hand" of this many queues and then picking one of minimum length.
|
||||
HandSize int
|
||||
|
||||
// RequestWaitLimit is the maximum amount of time that a request may wait in a queue.
|
||||
// If, by the end of that time, the request has not been dispatched then it is rejected.
|
||||
RequestWaitLimit time.Duration
|
||||
}
|
||||
|
||||
// DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet.
|
||||
|
15
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go
generated
vendored
15
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go
generated
vendored
@ -17,12 +17,13 @@ limitations under the License.
|
||||
package promise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// promise implements the WriteOnce interface.
|
||||
type promise struct {
|
||||
doneCh <-chan struct{}
|
||||
doneCtx context.Context
|
||||
doneVal interface{}
|
||||
setCh chan struct{}
|
||||
onceler sync.Once
|
||||
@ -35,12 +36,12 @@ var _ WriteOnce = &promise{}
|
||||
//
|
||||
// If `initial` is non-nil then that value is Set at creation time.
|
||||
//
|
||||
// If a `Get` is waiting soon after `doneCh` becomes selectable (which
|
||||
// never happens for the nil channel) then `Set(doneVal)` effectively
|
||||
// happens at that time.
|
||||
func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) WriteOnce {
|
||||
// If a `Get` is waiting soon after the channel associated with the
|
||||
// `doneCtx` becomes selectable (which never happens for the nil
|
||||
// channel) then `Set(doneVal)` effectively happens at that time.
|
||||
func NewWriteOnce(initial interface{}, doneCtx context.Context, doneVal interface{}) WriteOnce {
|
||||
p := &promise{
|
||||
doneCh: doneCh,
|
||||
doneCtx: doneCtx,
|
||||
doneVal: doneVal,
|
||||
setCh: make(chan struct{}),
|
||||
}
|
||||
@ -53,7 +54,7 @@ func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface
|
||||
func (p *promise) Get() interface{} {
|
||||
select {
|
||||
case <-p.setCh:
|
||||
case <-p.doneCh:
|
||||
case <-p.doneCtx.Done():
|
||||
p.Set(p.doneVal)
|
||||
}
|
||||
return p.value
|
||||
|
83
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
83
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
@ -53,7 +53,7 @@ type queueSetFactory struct {
|
||||
// - whose Set method is invoked with the queueSet locked, and
|
||||
// - whose Get method is invoked with the queueSet not locked.
|
||||
// The parameters are the same as for `promise.NewWriteOnce`.
|
||||
type promiseFactory func(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) promise.WriteOnce
|
||||
type promiseFactory func(initial interface{}, doneCtx context.Context, doneVal interface{}) promise.WriteOnce
|
||||
|
||||
// promiseFactoryFactory returns the promiseFactory to use for the given queueSet
|
||||
type promiseFactoryFactory func(*queueSet) promiseFactory
|
||||
@ -272,7 +272,6 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig,
|
||||
} else {
|
||||
qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit
|
||||
qCfg.HandSize = qs.qCfg.HandSize
|
||||
qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit
|
||||
}
|
||||
|
||||
qs.qCfg = qCfg
|
||||
@ -300,9 +299,6 @@ const (
|
||||
// Serve this one
|
||||
decisionExecute requestDecision = iota
|
||||
|
||||
// Reject this one due to APF queuing considerations
|
||||
decisionReject
|
||||
|
||||
// This one's context timed out / was canceled
|
||||
decisionCancel
|
||||
)
|
||||
@ -337,11 +333,10 @@ func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.Wo
|
||||
// ========================================================================
|
||||
// Step 1:
|
||||
// 1) Start with shuffle sharding, to pick a queue.
|
||||
// 2) Reject old requests that have been waiting too long
|
||||
// 3) Reject current request if there is not enough concurrency shares and
|
||||
// 2) Reject current request if there is not enough concurrency shares and
|
||||
// we are at max queue length
|
||||
// 4) If not rejected, create a request and enqueue
|
||||
req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn)
|
||||
// 3) If not rejected, create a request and enqueue
|
||||
req = qs.shuffleShardAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn)
|
||||
// req == nil means that the request was rejected - no remaining
|
||||
// concurrency shares and at max queue length already
|
||||
if req == nil {
|
||||
@ -422,13 +417,7 @@ func (req *request) wait() (bool, bool) {
|
||||
}
|
||||
req.waitStarted = true
|
||||
switch decisionAny {
|
||||
case decisionReject:
|
||||
klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
qs.totRequestsRejected++
|
||||
qs.totRequestsTimedout++
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out")
|
||||
return false, qs.isIdleLocked()
|
||||
case decisionCancel:
|
||||
case decisionCancel: // handle in code following this switch
|
||||
case decisionExecute:
|
||||
klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
return true, false
|
||||
@ -438,7 +427,7 @@ func (req *request) wait() (bool, bool) {
|
||||
}
|
||||
// TODO(aaron-prindle) add metrics for this case
|
||||
klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
// remove the request from the queue as it has timed out
|
||||
// remove the request from the queue as its queue wait time has exceeded
|
||||
queue := req.queue
|
||||
if req.removeFromQueueLocked() != nil {
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
@ -446,8 +435,9 @@ func (req *request) wait() (bool, bool) {
|
||||
qs.totSeatsWaiting -= req.MaxSeats()
|
||||
qs.totRequestsRejected++
|
||||
qs.totRequestsCancelled++
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled")
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out")
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats())
|
||||
req.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -555,25 +545,19 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 {
|
||||
return math.Min(float64(seatsRequested), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues)
|
||||
}
|
||||
|
||||
// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required
|
||||
// shuffleShardAndRejectOrEnqueueLocked encapsulates the logic required
|
||||
// to validate and enqueue a request for the queueSet/QueueSet:
|
||||
// 1) Start with shuffle sharding, to pick a queue.
|
||||
// 2) Reject old requests that have been waiting too long
|
||||
// 3) Reject current request if there is not enough concurrency shares and
|
||||
// 2) Reject current request if there is not enough concurrency shares and
|
||||
// we are at max queue length
|
||||
// 4) If not rejected, create a request and enqueue
|
||||
// 3) If not rejected, create a request and enqueue
|
||||
// returns the enqueud request on a successful enqueue
|
||||
// returns nil in the case that there is no available concurrency or
|
||||
// the queuelengthlimit has been reached
|
||||
func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request {
|
||||
func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request {
|
||||
// Start with the shuffle sharding, to pick a queue.
|
||||
queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2)
|
||||
queue := qs.queues[queueIdx]
|
||||
// The next step is the logic to reject requests that have been waiting too long
|
||||
qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName)
|
||||
// NOTE: currently timeout is only checked for each new request. This means that there can be
|
||||
// requests that are in the queue longer than the timeout if there are no new requests
|
||||
// We prefer the simplicity over the promptness, at least for now.
|
||||
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
|
||||
@ -583,7 +567,7 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte
|
||||
fsName: fsName,
|
||||
flowDistinguisher: flowDistinguisher,
|
||||
ctx: ctx,
|
||||
decision: qs.promiseFactory(nil, ctx.Done(), decisionCancel),
|
||||
decision: qs.promiseFactory(nil, ctx, decisionCancel),
|
||||
arrivalTime: qs.clock.Now(),
|
||||
arrivalR: qs.currentR,
|
||||
queue: queue,
|
||||
@ -632,43 +616,6 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac
|
||||
return bestQueueIdx
|
||||
}
|
||||
|
||||
// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued
|
||||
// past the requestWaitLimit
|
||||
func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) {
|
||||
timeoutCount := 0
|
||||
disqueueSeats := 0
|
||||
now := qs.clock.Now()
|
||||
reqs := queue.requestsWaiting
|
||||
// reqs are sorted oldest -> newest
|
||||
// can short circuit loop (break) if oldest requests are not timing out
|
||||
// as newer requests also will not have timed out
|
||||
|
||||
// now - requestWaitLimit = arrivalLimit
|
||||
arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit)
|
||||
reqs.Walk(func(req *request) bool {
|
||||
if arrivalLimit.After(req.arrivalTime) {
|
||||
if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil {
|
||||
timeoutCount++
|
||||
disqueueSeats += req.MaxSeats()
|
||||
req.NoteQueued(false)
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
}
|
||||
// we need to check if the next request has timed out.
|
||||
return true
|
||||
}
|
||||
// since reqs are sorted oldest -> newest, we are done here.
|
||||
return false
|
||||
})
|
||||
|
||||
// remove timed out requests from queue
|
||||
if timeoutCount > 0 {
|
||||
qs.totRequestsWaiting -= timeoutCount
|
||||
qs.totSeatsWaiting -= disqueueSeats
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
}
|
||||
}
|
||||
|
||||
// rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived
|
||||
// request, which has been assigned to a queue. If up against the
|
||||
// queue length limit and the concurrency limit then returns false.
|
||||
@ -702,6 +649,7 @@ func (qs *queueSet) enqueueToBoundLocked(request *request) {
|
||||
qs.totRequestsWaiting++
|
||||
qs.totSeatsWaiting += request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1)
|
||||
metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, request.MaxSeats())
|
||||
request.NoteQueued(true)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -722,7 +670,7 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f
|
||||
flowDistinguisher: flowDistinguisher,
|
||||
ctx: ctx,
|
||||
startTime: now,
|
||||
decision: qs.promiseFactory(decisionExecute, ctx.Done(), decisionCancel),
|
||||
decision: qs.promiseFactory(decisionExecute, ctx, decisionCancel),
|
||||
arrivalTime: now,
|
||||
arrivalR: qs.currentR,
|
||||
descr1: descr1,
|
||||
@ -760,6 +708,7 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
qs.totRequestsWaiting--
|
||||
qs.totSeatsWaiting -= request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1)
|
||||
metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -request.MaxSeats())
|
||||
request.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
|
Reference in New Issue
Block a user