mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: update kubernetes to 1.28.0 in main
updating kubernetes to 1.28.0 in the main repo. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
b2fdc269c3
commit
ff3e84ad67
12
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
12
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
@ -34,7 +34,10 @@ type QueueSetFactory interface {
|
||||
// BeginConstruction does the first phase of creating a QueueSet.
|
||||
// The RatioedGaugePair observes number of requests,
|
||||
// execution covering just the regular phase.
|
||||
// The denominator for the waiting phase is
|
||||
// max(1, QueuingConfig.QueueLengthLimit) X max(1, QueuingConfig.DesiredNumQueues).
|
||||
// The RatioedGauge observes number of seats occupied through all phases of execution.
|
||||
// The denominator for all the ratioed concurrency gauges is supplied later in the DispatchingConfig.
|
||||
// The Gauge observes the seat demand (executing + queued seats).
|
||||
BeginConstruction(QueuingConfig, metrics.RatioedGaugePair, metrics.RatioedGauge, metrics.Gauge) (QueueSetCompleter, error)
|
||||
}
|
||||
@ -113,8 +116,11 @@ type QueuingConfig struct {
|
||||
Name string
|
||||
|
||||
// DesiredNumQueues is the number of queues that the API says
|
||||
// should exist now. This may be zero, in which case
|
||||
// should exist now. This may be non-positive, in which case
|
||||
// QueueLengthLimit, HandSize, and RequestWaitLimit are ignored.
|
||||
// A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig.
|
||||
// A negative value means to always dispatch immediately upon arrival
|
||||
// (i.e., the requests are "exempt" from limitation).
|
||||
DesiredNumQueues int
|
||||
|
||||
// QueueLengthLimit is the maximum number of requests that may be waiting in a given queue at a time
|
||||
@ -133,4 +139,8 @@ type QueuingConfig struct {
|
||||
type DispatchingConfig struct {
|
||||
// ConcurrencyLimit is the maximum number of requests of this QueueSet that may be executing at a time
|
||||
ConcurrencyLimit int
|
||||
|
||||
// ConcurrencyDenominator is used in relative metrics of concurrency.
|
||||
// It equals ConcurrencyLimit except when that is zero.
|
||||
ConcurrencyDenominator int
|
||||
}
|
||||
|
89
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
89
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/debug"
|
||||
fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing"
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/eventclock"
|
||||
@ -138,6 +139,10 @@ type queueSet struct {
|
||||
// from that queue.
|
||||
totRequestsExecuting int
|
||||
|
||||
// requestsExecutingSet is the set of requests executing in the real world IF
|
||||
// there are no queues; otherwise the requests are tracked in the queues.
|
||||
requestsExecutingSet sets.Set[*request]
|
||||
|
||||
// totSeatsInUse is the number of total "seats" in use by all the
|
||||
// request(s) that are currently executing in this queueset.
|
||||
totSeatsInUse int
|
||||
@ -197,7 +202,7 @@ func (qsf *queueSetFactory) BeginConstruction(qCfg fq.QueuingConfig, reqsGaugePa
|
||||
// calls for one, and returns a non-nil error if the given config is
|
||||
// invalid.
|
||||
func checkConfig(qCfg fq.QueuingConfig) (*shufflesharding.Dealer, error) {
|
||||
if qCfg.DesiredNumQueues == 0 {
|
||||
if qCfg.DesiredNumQueues <= 0 {
|
||||
return nil, nil
|
||||
}
|
||||
dealer, err := shufflesharding.NewDealer(qCfg.DesiredNumQueues, qCfg.HandSize)
|
||||
@ -219,6 +224,7 @@ func (qsc *queueSetCompleter) Complete(dCfg fq.DispatchingConfig) fq.QueueSet {
|
||||
qCfg: qsc.qCfg,
|
||||
currentR: 0,
|
||||
lastRealTime: qsc.factory.clock.Now(),
|
||||
requestsExecutingSet: sets.New[*request](),
|
||||
}
|
||||
qs.promiseFactory = qsc.factory.promiseFactoryFactory(qs)
|
||||
}
|
||||
@ -230,7 +236,7 @@ func (qsc *queueSetCompleter) Complete(dCfg fq.DispatchingConfig) fq.QueueSet {
|
||||
func createQueues(n, baseIndex int) []*queue {
|
||||
fqqueues := make([]*queue, n)
|
||||
for i := 0; i < n; i++ {
|
||||
fqqueues[i] = &queue{index: baseIndex + i, requests: newRequestFIFO()}
|
||||
fqqueues[i] = &queue{index: baseIndex + i, requestsWaiting: newRequestFIFO(), requestsExecuting: sets.New[*request]()}
|
||||
}
|
||||
return fqqueues
|
||||
}
|
||||
@ -280,8 +286,8 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig,
|
||||
qll *= qCfg.DesiredNumQueues
|
||||
}
|
||||
qs.reqsGaugePair.RequestsWaiting.SetDenominator(float64(qll))
|
||||
qs.reqsGaugePair.RequestsExecuting.SetDenominator(float64(dCfg.ConcurrencyLimit))
|
||||
qs.execSeatsGauge.SetDenominator(float64(dCfg.ConcurrencyLimit))
|
||||
qs.reqsGaugePair.RequestsExecuting.SetDenominator(float64(dCfg.ConcurrencyDenominator))
|
||||
qs.execSeatsGauge.SetDenominator(float64(dCfg.ConcurrencyDenominator))
|
||||
|
||||
qs.dispatchAsMuchAsPossibleLocked()
|
||||
}
|
||||
@ -504,7 +510,7 @@ func (qs *queueSet) advanceEpoch(ctx context.Context, now time.Time, incrR fqreq
|
||||
klog.InfoS("Advancing epoch", "QS", qs.qCfg.Name, "when", now.Format(nsTimeFmt), "oldR", oldR, "newR", qs.currentR, "incrR", incrR)
|
||||
success := true
|
||||
for qIdx, queue := range qs.queues {
|
||||
if queue.requests.Length() == 0 && queue.requestsExecuting == 0 {
|
||||
if queue.requestsWaiting.Length() == 0 && queue.requestsExecuting.Len() == 0 {
|
||||
// Do not just decrement, the value could be quite outdated.
|
||||
// It is safe to reset to zero in this case, because the next request
|
||||
// will overwrite the zero with `qs.currentR`.
|
||||
@ -517,7 +523,7 @@ func (qs *queueSet) advanceEpoch(ctx context.Context, now time.Time, incrR fqreq
|
||||
klog.ErrorS(errors.New("queue::nextDispatchR underflow"), "Underflow", "QS", qs.qCfg.Name, "queue", qIdx, "oldNextDispatchR", oldNextDispatchR, "newNextDispatchR", queue.nextDispatchR, "incrR", incrR)
|
||||
success = false
|
||||
}
|
||||
queue.requests.Walk(func(req *request) bool {
|
||||
queue.requestsWaiting.Walk(func(req *request) bool {
|
||||
oldArrivalR := req.arrivalR
|
||||
req.arrivalR -= rDecrement
|
||||
if req.arrivalR > oldArrivalR {
|
||||
@ -538,8 +544,8 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 {
|
||||
for _, queue := range qs.queues {
|
||||
// here we want the sum of the maximum width of the requests in this queue since our
|
||||
// goal is to find the maximum rate at which the queue could work.
|
||||
seatsRequested += (queue.seatsInUse + queue.requests.QueueSum().MaxSeatsSum)
|
||||
if queue.requests.Length() > 0 || queue.requestsExecuting > 0 {
|
||||
seatsRequested += (queue.seatsInUse + queue.requestsWaiting.QueueSum().MaxSeatsSum)
|
||||
if queue.requestsWaiting.Length() > 0 || queue.requestsExecuting.Len() > 0 {
|
||||
activeQueues++
|
||||
}
|
||||
}
|
||||
@ -589,7 +595,7 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte
|
||||
if ok := qs.rejectOrEnqueueToBoundLocked(req); !ok {
|
||||
return nil
|
||||
}
|
||||
metrics.ObserveQueueLength(ctx, qs.qCfg.Name, fsName, queue.requests.Length())
|
||||
metrics.ObserveQueueLength(ctx, qs.qCfg.Name, fsName, queue.requestsWaiting.Length())
|
||||
return req
|
||||
}
|
||||
|
||||
@ -608,7 +614,7 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac
|
||||
for i := 0; i < handSize; i++ {
|
||||
queueIdx := hand[(offset+i)%handSize]
|
||||
queue := qs.queues[queueIdx]
|
||||
queueSum := queue.requests.QueueSum()
|
||||
queueSum := queue.requestsWaiting.QueueSum()
|
||||
|
||||
// this is the total amount of work in seat-seconds for requests
|
||||
// waiting in this queue, we will select the queue with the minimum.
|
||||
@ -621,7 +627,7 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac
|
||||
}
|
||||
if klogV := klog.V(6); klogV.Enabled() {
|
||||
chosenQueue := qs.queues[bestQueueIdx]
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: For request %#+v %#+v chose queue %d, with sum: %#v & %d seats in use & nextDispatchR=%v", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.currentR, descr1, descr2, bestQueueIdx, chosenQueue.requests.QueueSum(), chosenQueue.seatsInUse, chosenQueue.nextDispatchR)
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: For request %#+v %#+v chose queue %d, with sum: %#v & %d seats in use & nextDispatchR=%v", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.currentR, descr1, descr2, bestQueueIdx, chosenQueue.requestsWaiting.QueueSum(), chosenQueue.seatsInUse, chosenQueue.nextDispatchR)
|
||||
}
|
||||
return bestQueueIdx
|
||||
}
|
||||
@ -632,7 +638,7 @@ func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, f
|
||||
timeoutCount := 0
|
||||
disqueueSeats := 0
|
||||
now := qs.clock.Now()
|
||||
reqs := queue.requests
|
||||
reqs := queue.requestsWaiting
|
||||
// reqs are sorted oldest -> newest
|
||||
// can short circuit loop (break) if oldest requests are not timing out
|
||||
// as newer requests also will not have timed out
|
||||
@ -669,7 +675,7 @@ func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, f
|
||||
// Otherwise enqueues and returns true.
|
||||
func (qs *queueSet) rejectOrEnqueueToBoundLocked(request *request) bool {
|
||||
queue := request.queue
|
||||
curQueueLength := queue.requests.Length()
|
||||
curQueueLength := queue.requestsWaiting.Length()
|
||||
// rejects the newly arrived request if resource criteria not met
|
||||
if qs.totSeatsInUse >= qs.dCfg.ConcurrencyLimit &&
|
||||
curQueueLength >= qs.qCfg.QueueLengthLimit {
|
||||
@ -684,7 +690,7 @@ func (qs *queueSet) rejectOrEnqueueToBoundLocked(request *request) bool {
|
||||
func (qs *queueSet) enqueueToBoundLocked(request *request) {
|
||||
queue := request.queue
|
||||
now := qs.clock.Now()
|
||||
if queue.requests.Length() == 0 && queue.requestsExecuting == 0 {
|
||||
if queue.requestsWaiting.Length() == 0 && queue.requestsExecuting.Len() == 0 {
|
||||
// the queue’s start R is set to the virtual time.
|
||||
queue.nextDispatchR = qs.currentR
|
||||
klogV := klog.V(6)
|
||||
@ -692,7 +698,7 @@ func (qs *queueSet) enqueueToBoundLocked(request *request) {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: initialized queue %d start R due to request %#+v %#+v", qs.qCfg.Name, now.Format(nsTimeFmt), queue.nextDispatchR, queue.index, request.descr1, request.descr2)
|
||||
}
|
||||
}
|
||||
request.removeFromQueueLocked = queue.requests.Enqueue(request)
|
||||
request.removeFromQueueLocked = queue.requestsWaiting.Enqueue(request)
|
||||
qs.totRequestsWaiting++
|
||||
qs.totSeatsWaiting += request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1)
|
||||
@ -725,8 +731,9 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f
|
||||
}
|
||||
qs.totRequestsExecuting++
|
||||
qs.totSeatsInUse += req.MaxSeats()
|
||||
qs.requestsExecutingSet = qs.requestsExecutingSet.Insert(req)
|
||||
metrics.AddRequestsExecuting(ctx, qs.qCfg.Name, fsName, 1)
|
||||
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, fsName, req.MaxSeats())
|
||||
metrics.AddSeatConcurrencyInUse(qs.qCfg.Name, fsName, req.MaxSeats())
|
||||
qs.reqsGaugePair.RequestsExecuting.Add(1)
|
||||
qs.execSeatsGauge.Add(float64(req.MaxSeats()))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -768,10 +775,10 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
// problem because other overhead is also included.
|
||||
qs.totRequestsExecuting++
|
||||
qs.totSeatsInUse += request.MaxSeats()
|
||||
queue.requestsExecuting++
|
||||
queue.requestsExecuting = queue.requestsExecuting.Insert(request)
|
||||
queue.seatsInUse += request.MaxSeats()
|
||||
metrics.AddRequestsExecuting(request.ctx, qs.qCfg.Name, request.fsName, 1)
|
||||
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, request.fsName, request.MaxSeats())
|
||||
metrics.AddSeatConcurrencyInUse(qs.qCfg.Name, request.fsName, request.MaxSeats())
|
||||
qs.reqsGaugePair.RequestsExecuting.Add(1)
|
||||
qs.execSeatsGauge.Add(float64(request.MaxSeats()))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -779,7 +786,7 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
if klogV.Enabled() {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: dispatching request %#+v %#+v work %v from queue %d with start R %v, queue will have %d waiting & %d requests occupying %d seats, set will have %d seats occupied",
|
||||
qs.qCfg.Name, request.startTime.Format(nsTimeFmt), qs.currentR, request.descr1, request.descr2,
|
||||
request.workEstimate, queue.index, queue.nextDispatchR, queue.requests.Length(), queue.requestsExecuting, queue.seatsInUse, qs.totSeatsInUse)
|
||||
request.workEstimate, queue.index, queue.nextDispatchR, queue.requestsWaiting.Length(), queue.requestsExecuting.Len(), queue.seatsInUse, qs.totSeatsInUse)
|
||||
}
|
||||
// When a request is dequeued for service -> qs.virtualStart += G * width
|
||||
if request.totalWork() > rDecrement/100 { // A single increment should never be so big
|
||||
@ -796,6 +803,9 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
// otherwise it returns false.
|
||||
func (qs *queueSet) canAccommodateSeatsLocked(seats int) bool {
|
||||
switch {
|
||||
case qs.qCfg.DesiredNumQueues < 0:
|
||||
// This is code for exemption from limitation
|
||||
return true
|
||||
case seats > qs.dCfg.ConcurrencyLimit:
|
||||
// we have picked the queue with the minimum virtual finish time, but
|
||||
// the number of seats this request asks for exceeds the concurrency limit.
|
||||
@ -831,7 +841,7 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) {
|
||||
for range qs.queues {
|
||||
qs.robinIndex = (qs.robinIndex + 1) % nq
|
||||
queue := qs.queues[qs.robinIndex]
|
||||
oldestWaiting, _ := queue.requests.Peek()
|
||||
oldestWaiting, _ := queue.requestsWaiting.Peek()
|
||||
if oldestWaiting != nil {
|
||||
sMin = ssMin(sMin, queue.nextDispatchR)
|
||||
sMax = ssMax(sMax, queue.nextDispatchR)
|
||||
@ -848,7 +858,7 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) {
|
||||
}
|
||||
}
|
||||
|
||||
oldestReqFromMinQueue, _ := minQueue.requests.Peek()
|
||||
oldestReqFromMinQueue, _ := minQueue.requestsWaiting.Peek()
|
||||
if oldestReqFromMinQueue == nil {
|
||||
// This cannot happen
|
||||
klog.ErrorS(errors.New("selected queue is empty"), "Impossible", "queueSet", qs.qCfg.Name)
|
||||
@ -935,7 +945,7 @@ func (qs *queueSet) finishRequestLocked(r *request) {
|
||||
defer qs.removeQueueIfEmptyLocked(r)
|
||||
|
||||
qs.totSeatsInUse -= r.MaxSeats()
|
||||
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, r.fsName, -r.MaxSeats())
|
||||
metrics.AddSeatConcurrencyInUse(qs.qCfg.Name, r.fsName, -r.MaxSeats())
|
||||
qs.execSeatsGauge.Add(-float64(r.MaxSeats()))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
if r.queue != nil {
|
||||
@ -952,7 +962,7 @@ func (qs *queueSet) finishRequestLocked(r *request) {
|
||||
} else if r.queue != nil {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats",
|
||||
qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.MaxSeats(), r.queue.index,
|
||||
r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse)
|
||||
r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requestsWaiting.Length(), r.queue.requestsWaiting.QueueSum(), r.queue.requestsExecuting.Len(), r.queue.seatsInUse)
|
||||
} else {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, qs.totRequestsExecuting, qs.totSeatsInUse)
|
||||
}
|
||||
@ -964,7 +974,7 @@ func (qs *queueSet) finishRequestLocked(r *request) {
|
||||
} else if r.queue != nil {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats",
|
||||
qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, r.workEstimate.FinalSeats, additionalLatency.Seconds(), r.queue.index,
|
||||
r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse)
|
||||
r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requestsWaiting.Length(), r.queue.requestsWaiting.QueueSum(), r.queue.requestsExecuting.Len(), r.queue.seatsInUse)
|
||||
} else {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, r.workEstimate.FinalSeats, additionalLatency.Seconds(), qs.totRequestsExecuting, qs.totSeatsInUse)
|
||||
}
|
||||
@ -981,7 +991,7 @@ func (qs *queueSet) finishRequestLocked(r *request) {
|
||||
} else if r.queue != nil {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, queue %d will have %d requests with %#v waiting & %d requests occupying %d seats",
|
||||
qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.FinalSeats, r.queue.index,
|
||||
r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse)
|
||||
r.queue.requestsWaiting.Length(), r.queue.requestsWaiting.QueueSum(), r.queue.requestsExecuting.Len(), r.queue.seatsInUse)
|
||||
} else {
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.FinalSeats, qs.totRequestsExecuting, qs.totSeatsInUse)
|
||||
}
|
||||
@ -991,12 +1001,14 @@ func (qs *queueSet) finishRequestLocked(r *request) {
|
||||
|
||||
if r.queue != nil {
|
||||
// request has finished, remove from requests executing
|
||||
r.queue.requestsExecuting--
|
||||
r.queue.requestsExecuting = r.queue.requestsExecuting.Delete(r)
|
||||
|
||||
// When a request finishes being served, and the actual service time was S,
|
||||
// the queue’s start R is decremented by (G - S)*width.
|
||||
r.queue.nextDispatchR -= fqrequest.SeatsTimesDuration(float64(r.InitialSeats()), qs.estimatedServiceDuration-actualServiceDuration)
|
||||
qs.boundNextDispatchLocked(r.queue)
|
||||
} else {
|
||||
qs.requestsExecutingSet = qs.requestsExecutingSet.Delete(r)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1008,7 +1020,7 @@ func (qs *queueSet) finishRequestLocked(r *request) {
|
||||
// The following hack addresses the first side of that inequity,
|
||||
// by insisting that dispatch in the virtual world not precede arrival.
|
||||
func (qs *queueSet) boundNextDispatchLocked(queue *queue) {
|
||||
oldestReqFromMinQueue, _ := queue.requests.Peek()
|
||||
oldestReqFromMinQueue, _ := queue.requestsWaiting.Peek()
|
||||
if oldestReqFromMinQueue == nil {
|
||||
return
|
||||
}
|
||||
@ -1029,8 +1041,8 @@ func (qs *queueSet) removeQueueIfEmptyLocked(r *request) {
|
||||
// If there are more queues than desired and this one has no
|
||||
// requests then remove it
|
||||
if len(qs.queues) > qs.qCfg.DesiredNumQueues &&
|
||||
r.queue.requests.Length() == 0 &&
|
||||
r.queue.requestsExecuting == 0 {
|
||||
r.queue.requestsWaiting.Length() == 0 &&
|
||||
r.queue.requestsExecuting.Len() == 0 {
|
||||
qs.queues = removeQueueAndUpdateIndexes(qs.queues, r.queue.index)
|
||||
|
||||
// decrement here to maintain the invariant that (qs.robinIndex+1) % numQueues
|
||||
@ -1055,15 +1067,16 @@ func (qs *queueSet) Dump(includeRequestDetails bool) debug.QueueSetDump {
|
||||
qs.lock.Lock()
|
||||
defer qs.lock.Unlock()
|
||||
d := debug.QueueSetDump{
|
||||
Queues: make([]debug.QueueDump, len(qs.queues)),
|
||||
Waiting: qs.totRequestsWaiting,
|
||||
Executing: qs.totRequestsExecuting,
|
||||
SeatsInUse: qs.totSeatsInUse,
|
||||
SeatsWaiting: qs.totSeatsWaiting,
|
||||
Dispatched: qs.totRequestsDispatched,
|
||||
Rejected: qs.totRequestsRejected,
|
||||
Timedout: qs.totRequestsTimedout,
|
||||
Cancelled: qs.totRequestsCancelled,
|
||||
Queues: make([]debug.QueueDump, len(qs.queues)),
|
||||
QueuelessExecutingRequests: SetMapReduce(dumpRequest(includeRequestDetails), append1[debug.RequestDump])(qs.requestsExecutingSet),
|
||||
Waiting: qs.totRequestsWaiting,
|
||||
Executing: qs.totRequestsExecuting,
|
||||
SeatsInUse: qs.totSeatsInUse,
|
||||
SeatsWaiting: qs.totSeatsWaiting,
|
||||
Dispatched: qs.totRequestsDispatched,
|
||||
Rejected: qs.totRequestsRejected,
|
||||
Timedout: qs.totRequestsTimedout,
|
||||
Cancelled: qs.totRequestsCancelled,
|
||||
}
|
||||
for i, q := range qs.queues {
|
||||
d.Queues[i] = q.dumpLocked(includeRequestDetails)
|
||||
|
85
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go
generated
vendored
85
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/types.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
genericrequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/debug"
|
||||
fq "k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing"
|
||||
@ -90,15 +91,15 @@ type completedWorkEstimate struct {
|
||||
// queue is a sequence of requests that have arrived but not yet finished
|
||||
// execution in both the real and virtual worlds.
|
||||
type queue struct {
|
||||
// The requests not yet executing in the real world are stored in a FIFO list.
|
||||
requests fifo
|
||||
// The requestsWaiting not yet executing in the real world are stored in a FIFO list.
|
||||
requestsWaiting fifo
|
||||
|
||||
// nextDispatchR is the R progress meter reading at
|
||||
// which the next request will be dispatched in the virtual world.
|
||||
nextDispatchR fcrequest.SeatSeconds
|
||||
|
||||
// requestsExecuting is the count in the real world.
|
||||
requestsExecuting int
|
||||
// requestsExecuting is the set of requests executing in the real world.
|
||||
requestsExecuting sets.Set[*request]
|
||||
|
||||
// index is the position of this queue among those in its queueSet.
|
||||
index int
|
||||
@ -145,28 +146,14 @@ func (qs *queueSet) computeFinalWork(we *fcrequest.WorkEstimate) fcrequest.SeatS
|
||||
}
|
||||
|
||||
func (q *queue) dumpLocked(includeDetails bool) debug.QueueDump {
|
||||
digest := make([]debug.RequestDump, q.requests.Length())
|
||||
i := 0
|
||||
q.requests.Walk(func(r *request) bool {
|
||||
// dump requests.
|
||||
digest[i].MatchedFlowSchema = r.fsName
|
||||
digest[i].FlowDistinguisher = r.flowDistinguisher
|
||||
digest[i].ArriveTime = r.arrivalTime
|
||||
digest[i].StartTime = r.startTime
|
||||
digest[i].WorkEstimate = r.workEstimate.WorkEstimate
|
||||
if includeDetails {
|
||||
userInfo, _ := genericrequest.UserFrom(r.ctx)
|
||||
digest[i].UserName = userInfo.GetName()
|
||||
requestInfo, ok := genericrequest.RequestInfoFrom(r.ctx)
|
||||
if ok {
|
||||
digest[i].RequestInfo = *requestInfo
|
||||
}
|
||||
}
|
||||
i++
|
||||
waitingDigest := make([]debug.RequestDump, 0, q.requestsWaiting.Length())
|
||||
q.requestsWaiting.Walk(func(r *request) bool {
|
||||
waitingDigest = append(waitingDigest, dumpRequest(includeDetails)(r))
|
||||
return true
|
||||
})
|
||||
executingDigest := SetMapReduce(dumpRequest(includeDetails), append1[debug.RequestDump])(q.requestsExecuting)
|
||||
|
||||
sum := q.requests.QueueSum()
|
||||
sum := q.requestsWaiting.QueueSum()
|
||||
queueSum := debug.QueueSum{
|
||||
InitialSeatsSum: sum.InitialSeatsSum,
|
||||
MaxSeatsSum: sum.MaxSeatsSum,
|
||||
@ -175,9 +162,57 @@ func (q *queue) dumpLocked(includeDetails bool) debug.QueueDump {
|
||||
|
||||
return debug.QueueDump{
|
||||
NextDispatchR: q.nextDispatchR.String(),
|
||||
Requests: digest,
|
||||
ExecutingRequests: q.requestsExecuting,
|
||||
Requests: waitingDigest,
|
||||
RequestsExecuting: executingDigest,
|
||||
ExecutingRequests: q.requestsExecuting.Len(),
|
||||
SeatsInUse: q.seatsInUse,
|
||||
QueueSum: queueSum,
|
||||
}
|
||||
}
|
||||
|
||||
func dumpRequest(includeDetails bool) func(*request) debug.RequestDump {
|
||||
return func(r *request) debug.RequestDump {
|
||||
ans := debug.RequestDump{
|
||||
MatchedFlowSchema: r.fsName,
|
||||
FlowDistinguisher: r.flowDistinguisher,
|
||||
ArriveTime: r.arrivalTime,
|
||||
StartTime: r.startTime,
|
||||
WorkEstimate: r.workEstimate.WorkEstimate,
|
||||
}
|
||||
if includeDetails {
|
||||
userInfo, _ := genericrequest.UserFrom(r.ctx)
|
||||
ans.UserName = userInfo.GetName()
|
||||
requestInfo, ok := genericrequest.RequestInfoFrom(r.ctx)
|
||||
if ok {
|
||||
ans.RequestInfo = *requestInfo
|
||||
}
|
||||
}
|
||||
return ans
|
||||
}
|
||||
}
|
||||
|
||||
// SetMapReduce is map-reduce starting from a set type in the sets package.
|
||||
func SetMapReduce[Elt comparable, Result, Accumulator any](mapFn func(Elt) Result, reduceFn func(Accumulator, Result) Accumulator) func(map[Elt]sets.Empty) Accumulator {
|
||||
return func(set map[Elt]sets.Empty) Accumulator {
|
||||
var ans Accumulator
|
||||
for elt := range set {
|
||||
ans = reduceFn(ans, mapFn(elt))
|
||||
}
|
||||
return ans
|
||||
}
|
||||
}
|
||||
|
||||
// SliceMapReduce is map-reduce starting from a slice.
|
||||
func SliceMapReduce[Elt, Result, Accumulator any](mapFn func(Elt) Result, reduceFn func(Accumulator, Result) Accumulator) func([]Elt) Accumulator {
|
||||
return func(slice []Elt) Accumulator {
|
||||
var ans Accumulator
|
||||
for _, elt := range slice {
|
||||
ans = reduceFn(ans, mapFn(elt))
|
||||
}
|
||||
return ans
|
||||
}
|
||||
}
|
||||
|
||||
func or(x, y bool) bool { return x || y }
|
||||
|
||||
func append1[Elt any](slice []Elt, next Elt) []Elt { return append(slice, next) }
|
||||
|
Reference in New Issue
Block a user