mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update k8s.io packages to v0.29.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
328a264202
commit
f080b9e0c9
2
vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go
generated
vendored
@ -19,7 +19,7 @@ package apihelpers
|
||||
import (
|
||||
"sort"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1"
|
||||
)
|
||||
|
||||
// SetFlowSchemaCondition sets conditions.
|
||||
|
58
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
58
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
@ -50,10 +50,10 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3"
|
||||
flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta3"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1"
|
||||
flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1"
|
||||
flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1"
|
||||
)
|
||||
|
||||
const timeFmt = "2006-01-02T15:04:05.999"
|
||||
@ -143,16 +143,13 @@ type configController struct {
|
||||
fsLister flowcontrollister.FlowSchemaLister
|
||||
fsInformerSynced cache.InformerSynced
|
||||
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1Interface
|
||||
|
||||
// serverConcurrencyLimit is the limit on the server's total
|
||||
// number of non-exempt requests being served at once. This comes
|
||||
// from server configuration.
|
||||
serverConcurrencyLimit int
|
||||
|
||||
// requestWaitLimit comes from server configuration.
|
||||
requestWaitLimit time.Duration
|
||||
|
||||
// watchTracker implements the necessary WatchTracker interface.
|
||||
WatchTracker
|
||||
|
||||
@ -263,9 +260,15 @@ type seatDemandStats struct {
|
||||
}
|
||||
|
||||
func (stats *seatDemandStats) update(obs fq.IntegratorResults) {
|
||||
stats.highWatermark = obs.Max
|
||||
if obs.Duration <= 0 {
|
||||
return
|
||||
}
|
||||
if math.IsNaN(obs.Deviation) {
|
||||
obs.Deviation = 0
|
||||
}
|
||||
stats.avg = obs.Average
|
||||
stats.stdDev = obs.Deviation
|
||||
stats.highWatermark = obs.Max
|
||||
envelope := obs.Average + obs.Deviation
|
||||
stats.smoothed = math.Max(envelope, seatDemandSmoothingCoefficient*stats.smoothed+(1-seatDemandSmoothingCoefficient)*envelope)
|
||||
}
|
||||
@ -281,19 +284,18 @@ func newTestableController(config TestableConfig) *configController {
|
||||
asFieldManager: config.AsFieldManager,
|
||||
foundToDangling: config.FoundToDangling,
|
||||
serverConcurrencyLimit: config.ServerConcurrencyLimit,
|
||||
requestWaitLimit: config.RequestWaitLimit,
|
||||
flowcontrolClient: config.FlowcontrolClient,
|
||||
priorityLevelStates: make(map[string]*priorityLevelState),
|
||||
WatchTracker: NewWatchTracker(),
|
||||
MaxSeatsTracker: NewMaxSeatsTracker(),
|
||||
}
|
||||
klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, requestWaitLimit=%s, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.requestWaitLimit, cfgCtlr.name, cfgCtlr.asFieldManager)
|
||||
klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager)
|
||||
// Start with longish delay because conflicts will be between
|
||||
// different processes, so take some time to go away.
|
||||
cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue")
|
||||
// ensure the data structure reflects the mandatory config
|
||||
cfgCtlr.lockAndDigestConfigObjects(nil, nil)
|
||||
fci := config.InformerFactory.Flowcontrol().V1beta3()
|
||||
fci := config.InformerFactory.Flowcontrol().V1()
|
||||
pli := fci.PriorityLevelConfigurations()
|
||||
fsi := fci.FlowSchemas()
|
||||
cfgCtlr.plLister = pli.Lister()
|
||||
@ -427,7 +429,7 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta
|
||||
plState := plStates[plName]
|
||||
if setCompleters {
|
||||
qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
plState.pl, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl)
|
||||
@ -651,10 +653,10 @@ func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontro
|
||||
|
||||
// Supply missing mandatory PriorityLevelConfiguration objects
|
||||
if !meal.haveExemptPL {
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit)
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt)
|
||||
}
|
||||
if !meal.haveCatchAllPL {
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit)
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll)
|
||||
}
|
||||
|
||||
meal.finishQueueSetReconfigsLocked()
|
||||
@ -686,7 +688,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi
|
||||
}
|
||||
}
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues,
|
||||
pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs,
|
||||
pl, state.reqsGaugePair, state.execSeatsObs,
|
||||
metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err)
|
||||
@ -700,7 +702,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi
|
||||
state.quiescing = false
|
||||
}
|
||||
nominalConcurrencyShares, _, _ := plSpecCommons(state.pl)
|
||||
meal.shareSum += float64(nominalConcurrencyShares)
|
||||
meal.shareSum += float64(*nominalConcurrencyShares)
|
||||
meal.haveExemptPL = meal.haveExemptPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt
|
||||
meal.haveCatchAllPL = meal.haveCatchAllPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameCatchAll
|
||||
}
|
||||
@ -792,7 +794,7 @@ func (meal *cfgMeal) processOldPLsLocked() {
|
||||
}
|
||||
var err error
|
||||
plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
plState.pl, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because queueSetCompleterForPL already approved this config
|
||||
@ -805,7 +807,7 @@ func (meal *cfgMeal) processOldPLsLocked() {
|
||||
// allocation determined by all the share values in the
|
||||
// regular way.
|
||||
nominalConcurrencyShares, _, _ := plSpecCommons(plState.pl)
|
||||
meal.shareSum += float64(nominalConcurrencyShares)
|
||||
meal.shareSum += float64(*nominalConcurrencyShares)
|
||||
meal.haveExemptPL = meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameExempt
|
||||
meal.haveCatchAllPL = meal.haveCatchAllPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll
|
||||
meal.newPLStates[plName] = plState
|
||||
@ -821,7 +823,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() {
|
||||
// The use of math.Ceil here means that the results might sum
|
||||
// to a little more than serverConcurrencyLimit but the
|
||||
// difference will be negligible.
|
||||
concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(nominalConcurrencyShares) / meal.shareSum))
|
||||
concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(*nominalConcurrencyShares) / meal.shareSum))
|
||||
var lendableCL, borrowingCL int
|
||||
if lendablePercent != nil {
|
||||
lendableCL = int(math.Round(float64(concurrencyLimit) * float64(*lendablePercent) / 100))
|
||||
@ -874,7 +876,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() {
|
||||
// queueSetCompleterForPL returns an appropriate QueueSetCompleter for the
|
||||
// given priority level configuration. Returns nil and an error if the given
|
||||
// object is malformed in a way that is a problem for this package.
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) {
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) {
|
||||
if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementLimited) != (pl.Spec.Limited != nil) {
|
||||
return nil, errors.New("broken union structure at the top, for Limited")
|
||||
}
|
||||
@ -896,7 +898,6 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow
|
||||
DesiredNumQueues: int(qcAPI.Queues),
|
||||
QueueLengthLimit: int(qcAPI.QueueLengthLimit),
|
||||
HandSize: int(qcAPI.HandSize),
|
||||
RequestWaitLimit: requestWaitLimit,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -950,16 +951,15 @@ func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangl
|
||||
|
||||
// imaginePL adds a priority level based on one of the mandatory ones
|
||||
// that does not actually exist (right now) as a real API object.
|
||||
func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) {
|
||||
func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration) {
|
||||
klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name)
|
||||
labelValues := []string{proto.Name}
|
||||
reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues)
|
||||
execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues)
|
||||
seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name)
|
||||
seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name})
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto,
|
||||
requestWaitLimit, reqsGaugePair, execSeatsObs,
|
||||
metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge))
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, reqsGaugePair,
|
||||
execSeatsObs, metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because proto is one of the mandatory
|
||||
// objects and these are not erroneous
|
||||
@ -974,7 +974,7 @@ func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, re
|
||||
seatDemandRatioedGauge: seatDemandRatioedGauge,
|
||||
}
|
||||
nominalConcurrencyShares, _, _ := plSpecCommons(proto)
|
||||
meal.shareSum += float64(nominalConcurrencyShares)
|
||||
meal.shareSum += float64(*nominalConcurrencyShares)
|
||||
}
|
||||
|
||||
// startRequest classifies and, if appropriate, enqueues the request.
|
||||
@ -1112,7 +1112,7 @@ func relDiff(x, y float64) float64 {
|
||||
}
|
||||
|
||||
// plSpecCommons returns the (NominalConcurrencyShares, LendablePercent, BorrowingLimitPercent) of the given priority level config
|
||||
func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (int32, *int32, *int32) {
|
||||
func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (*int32, *int32, *int32) {
|
||||
if limiter := pl.Spec.Limited; limiter != nil {
|
||||
return limiter.NominalConcurrencyShares, limiter.LendablePercent, limiter.BorrowingLimitPercent
|
||||
}
|
||||
@ -1121,5 +1121,5 @@ func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (int32, *int32, *
|
||||
if limiter.NominalConcurrencyShares != nil {
|
||||
nominalConcurrencyShares = *limiter.NominalConcurrencyShares
|
||||
}
|
||||
return nominalConcurrencyShares, limiter.LendablePercent, nil
|
||||
return &nominalConcurrencyShares, limiter.LendablePercent, nil
|
||||
}
|
||||
|
13
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go
generated
vendored
13
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go
generated
vendored
@ -33,8 +33,8 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1"
|
||||
flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1"
|
||||
)
|
||||
|
||||
// ConfigConsumerAsFieldManager is how the config consuminng
|
||||
@ -88,9 +88,8 @@ type Interface interface {
|
||||
// New creates a new instance to implement API priority and fairness
|
||||
func New(
|
||||
informerFactory kubeinformers.SharedInformerFactory,
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface,
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1Interface,
|
||||
serverConcurrencyLimit int,
|
||||
requestWaitLimit time.Duration,
|
||||
) Interface {
|
||||
clk := eventclock.Real{}
|
||||
return NewTestable(TestableConfig{
|
||||
@ -101,7 +100,6 @@ func New(
|
||||
InformerFactory: informerFactory,
|
||||
FlowcontrolClient: flowcontrolClient,
|
||||
ServerConcurrencyLimit: serverConcurrencyLimit,
|
||||
RequestWaitLimit: requestWaitLimit,
|
||||
ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec,
|
||||
ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec,
|
||||
QueueSetFactory: fqs.NewQueueSetFactory(clk),
|
||||
@ -134,14 +132,11 @@ type TestableConfig struct {
|
||||
InformerFactory kubeinformers.SharedInformerFactory
|
||||
|
||||
// FlowcontrolClient to use for manipulating config objects
|
||||
FlowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface
|
||||
FlowcontrolClient flowcontrolclient.FlowcontrolV1Interface
|
||||
|
||||
// ServerConcurrencyLimit for the controller to enforce
|
||||
ServerConcurrencyLimit int
|
||||
|
||||
// RequestWaitLimit configured on the server
|
||||
RequestWaitLimit time.Duration
|
||||
|
||||
// GaugeVec for metrics about requests, broken down by phase and priority_level
|
||||
ReqsGaugeVec metrics.RatioedGaugeVec
|
||||
|
||||
|
7
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
7
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
@ -18,7 +18,6 @@ package fairqueuing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/debug"
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/metrics"
|
||||
@ -117,7 +116,7 @@ type QueuingConfig struct {
|
||||
|
||||
// DesiredNumQueues is the number of queues that the API says
|
||||
// should exist now. This may be non-positive, in which case
|
||||
// QueueLengthLimit, HandSize, and RequestWaitLimit are ignored.
|
||||
// QueueLengthLimit, and HandSize are ignored.
|
||||
// A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig.
|
||||
// A negative value means to always dispatch immediately upon arrival
|
||||
// (i.e., the requests are "exempt" from limitation).
|
||||
@ -129,10 +128,6 @@ type QueuingConfig struct {
|
||||
// HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly
|
||||
// dealing a "hand" of this many queues and then picking one of minimum length.
|
||||
HandSize int
|
||||
|
||||
// RequestWaitLimit is the maximum amount of time that a request may wait in a queue.
|
||||
// If, by the end of that time, the request has not been dispatched then it is rejected.
|
||||
RequestWaitLimit time.Duration
|
||||
}
|
||||
|
||||
// DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet.
|
||||
|
15
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go
generated
vendored
15
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go
generated
vendored
@ -17,12 +17,13 @@ limitations under the License.
|
||||
package promise
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// promise implements the WriteOnce interface.
|
||||
type promise struct {
|
||||
doneCh <-chan struct{}
|
||||
doneCtx context.Context
|
||||
doneVal interface{}
|
||||
setCh chan struct{}
|
||||
onceler sync.Once
|
||||
@ -35,12 +36,12 @@ var _ WriteOnce = &promise{}
|
||||
//
|
||||
// If `initial` is non-nil then that value is Set at creation time.
|
||||
//
|
||||
// If a `Get` is waiting soon after `doneCh` becomes selectable (which
|
||||
// never happens for the nil channel) then `Set(doneVal)` effectively
|
||||
// happens at that time.
|
||||
func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) WriteOnce {
|
||||
// If a `Get` is waiting soon after the channel associated with the
|
||||
// `doneCtx` becomes selectable (which never happens for the nil
|
||||
// channel) then `Set(doneVal)` effectively happens at that time.
|
||||
func NewWriteOnce(initial interface{}, doneCtx context.Context, doneVal interface{}) WriteOnce {
|
||||
p := &promise{
|
||||
doneCh: doneCh,
|
||||
doneCtx: doneCtx,
|
||||
doneVal: doneVal,
|
||||
setCh: make(chan struct{}),
|
||||
}
|
||||
@ -53,7 +54,7 @@ func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface
|
||||
func (p *promise) Get() interface{} {
|
||||
select {
|
||||
case <-p.setCh:
|
||||
case <-p.doneCh:
|
||||
case <-p.doneCtx.Done():
|
||||
p.Set(p.doneVal)
|
||||
}
|
||||
return p.value
|
||||
|
83
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
83
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
@ -53,7 +53,7 @@ type queueSetFactory struct {
|
||||
// - whose Set method is invoked with the queueSet locked, and
|
||||
// - whose Get method is invoked with the queueSet not locked.
|
||||
// The parameters are the same as for `promise.NewWriteOnce`.
|
||||
type promiseFactory func(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) promise.WriteOnce
|
||||
type promiseFactory func(initial interface{}, doneCtx context.Context, doneVal interface{}) promise.WriteOnce
|
||||
|
||||
// promiseFactoryFactory returns the promiseFactory to use for the given queueSet
|
||||
type promiseFactoryFactory func(*queueSet) promiseFactory
|
||||
@ -272,7 +272,6 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig,
|
||||
} else {
|
||||
qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit
|
||||
qCfg.HandSize = qs.qCfg.HandSize
|
||||
qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit
|
||||
}
|
||||
|
||||
qs.qCfg = qCfg
|
||||
@ -300,9 +299,6 @@ const (
|
||||
// Serve this one
|
||||
decisionExecute requestDecision = iota
|
||||
|
||||
// Reject this one due to APF queuing considerations
|
||||
decisionReject
|
||||
|
||||
// This one's context timed out / was canceled
|
||||
decisionCancel
|
||||
)
|
||||
@ -337,11 +333,10 @@ func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.Wo
|
||||
// ========================================================================
|
||||
// Step 1:
|
||||
// 1) Start with shuffle sharding, to pick a queue.
|
||||
// 2) Reject old requests that have been waiting too long
|
||||
// 3) Reject current request if there is not enough concurrency shares and
|
||||
// 2) Reject current request if there is not enough concurrency shares and
|
||||
// we are at max queue length
|
||||
// 4) If not rejected, create a request and enqueue
|
||||
req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn)
|
||||
// 3) If not rejected, create a request and enqueue
|
||||
req = qs.shuffleShardAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn)
|
||||
// req == nil means that the request was rejected - no remaining
|
||||
// concurrency shares and at max queue length already
|
||||
if req == nil {
|
||||
@ -422,13 +417,7 @@ func (req *request) wait() (bool, bool) {
|
||||
}
|
||||
req.waitStarted = true
|
||||
switch decisionAny {
|
||||
case decisionReject:
|
||||
klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
qs.totRequestsRejected++
|
||||
qs.totRequestsTimedout++
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out")
|
||||
return false, qs.isIdleLocked()
|
||||
case decisionCancel:
|
||||
case decisionCancel: // handle in code following this switch
|
||||
case decisionExecute:
|
||||
klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
return true, false
|
||||
@ -438,7 +427,7 @@ func (req *request) wait() (bool, bool) {
|
||||
}
|
||||
// TODO(aaron-prindle) add metrics for this case
|
||||
klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
// remove the request from the queue as it has timed out
|
||||
// remove the request from the queue as its queue wait time has exceeded
|
||||
queue := req.queue
|
||||
if req.removeFromQueueLocked() != nil {
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
@ -446,8 +435,9 @@ func (req *request) wait() (bool, bool) {
|
||||
qs.totSeatsWaiting -= req.MaxSeats()
|
||||
qs.totRequestsRejected++
|
||||
qs.totRequestsCancelled++
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled")
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out")
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats())
|
||||
req.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -555,25 +545,19 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 {
|
||||
return math.Min(float64(seatsRequested), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues)
|
||||
}
|
||||
|
||||
// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required
|
||||
// shuffleShardAndRejectOrEnqueueLocked encapsulates the logic required
|
||||
// to validate and enqueue a request for the queueSet/QueueSet:
|
||||
// 1) Start with shuffle sharding, to pick a queue.
|
||||
// 2) Reject old requests that have been waiting too long
|
||||
// 3) Reject current request if there is not enough concurrency shares and
|
||||
// 2) Reject current request if there is not enough concurrency shares and
|
||||
// we are at max queue length
|
||||
// 4) If not rejected, create a request and enqueue
|
||||
// 3) If not rejected, create a request and enqueue
|
||||
// returns the enqueud request on a successful enqueue
|
||||
// returns nil in the case that there is no available concurrency or
|
||||
// the queuelengthlimit has been reached
|
||||
func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request {
|
||||
func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request {
|
||||
// Start with the shuffle sharding, to pick a queue.
|
||||
queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2)
|
||||
queue := qs.queues[queueIdx]
|
||||
// The next step is the logic to reject requests that have been waiting too long
|
||||
qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName)
|
||||
// NOTE: currently timeout is only checked for each new request. This means that there can be
|
||||
// requests that are in the queue longer than the timeout if there are no new requests
|
||||
// We prefer the simplicity over the promptness, at least for now.
|
||||
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
|
||||
@ -583,7 +567,7 @@ func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Conte
|
||||
fsName: fsName,
|
||||
flowDistinguisher: flowDistinguisher,
|
||||
ctx: ctx,
|
||||
decision: qs.promiseFactory(nil, ctx.Done(), decisionCancel),
|
||||
decision: qs.promiseFactory(nil, ctx, decisionCancel),
|
||||
arrivalTime: qs.clock.Now(),
|
||||
arrivalR: qs.currentR,
|
||||
queue: queue,
|
||||
@ -632,43 +616,6 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac
|
||||
return bestQueueIdx
|
||||
}
|
||||
|
||||
// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued
|
||||
// past the requestWaitLimit
|
||||
func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) {
|
||||
timeoutCount := 0
|
||||
disqueueSeats := 0
|
||||
now := qs.clock.Now()
|
||||
reqs := queue.requestsWaiting
|
||||
// reqs are sorted oldest -> newest
|
||||
// can short circuit loop (break) if oldest requests are not timing out
|
||||
// as newer requests also will not have timed out
|
||||
|
||||
// now - requestWaitLimit = arrivalLimit
|
||||
arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit)
|
||||
reqs.Walk(func(req *request) bool {
|
||||
if arrivalLimit.After(req.arrivalTime) {
|
||||
if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil {
|
||||
timeoutCount++
|
||||
disqueueSeats += req.MaxSeats()
|
||||
req.NoteQueued(false)
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
}
|
||||
// we need to check if the next request has timed out.
|
||||
return true
|
||||
}
|
||||
// since reqs are sorted oldest -> newest, we are done here.
|
||||
return false
|
||||
})
|
||||
|
||||
// remove timed out requests from queue
|
||||
if timeoutCount > 0 {
|
||||
qs.totRequestsWaiting -= timeoutCount
|
||||
qs.totSeatsWaiting -= disqueueSeats
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
}
|
||||
}
|
||||
|
||||
// rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived
|
||||
// request, which has been assigned to a queue. If up against the
|
||||
// queue length limit and the concurrency limit then returns false.
|
||||
@ -702,6 +649,7 @@ func (qs *queueSet) enqueueToBoundLocked(request *request) {
|
||||
qs.totRequestsWaiting++
|
||||
qs.totSeatsWaiting += request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1)
|
||||
metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, request.MaxSeats())
|
||||
request.NoteQueued(true)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -722,7 +670,7 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f
|
||||
flowDistinguisher: flowDistinguisher,
|
||||
ctx: ctx,
|
||||
startTime: now,
|
||||
decision: qs.promiseFactory(decisionExecute, ctx.Done(), decisionCancel),
|
||||
decision: qs.promiseFactory(decisionExecute, ctx, decisionCancel),
|
||||
arrivalTime: now,
|
||||
arrivalR: qs.currentR,
|
||||
descr1: descr1,
|
||||
@ -760,6 +708,7 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
qs.totRequestsWaiting--
|
||||
qs.totSeatsWaiting -= request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1)
|
||||
metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -request.MaxSeats())
|
||||
request.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
|
14
vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go
generated
vendored
14
vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
@ -93,7 +93,7 @@ func FmtPriorityLevelConfiguration(pl *flowcontrol.PriorityLevelConfiguration) s
|
||||
return "nil"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ",
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ",
|
||||
pl.ObjectMeta))
|
||||
BufferPriorityLevelConfigurationSpec(&buf, &pl.Spec)
|
||||
buf.WriteString(fmt.Sprintf(", Status: %#+v}", pl.Status))
|
||||
@ -111,7 +111,7 @@ func FmtPriorityLevelConfigurationSpec(plSpec *flowcontrol.PriorityLevelConfigur
|
||||
// BufferPriorityLevelConfigurationSpec writes a golang source
|
||||
// expression for the given value to the given buffer
|
||||
func BufferPriorityLevelConfigurationSpec(buf *bytes.Buffer, plSpec *flowcontrol.PriorityLevelConfigurationSpec) {
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type))
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type))
|
||||
if plSpec.Limited != nil {
|
||||
buf.WriteString(fmt.Sprintf(", Limited: &flowcontrol.LimitedPriorityLevelConfiguration{NominalConcurrencyShares:%d, LimitResponse:flowcontrol.LimitResponse{Type:%#v", plSpec.Limited.NominalConcurrencyShares, plSpec.Limited.LimitResponse.Type))
|
||||
if plSpec.Limited.LimitResponse.Queuing != nil {
|
||||
@ -128,7 +128,7 @@ func FmtFlowSchema(fs *flowcontrol.FlowSchema) string {
|
||||
return "nil"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.FlowSchema{ObjectMeta: %#+v, Spec: ",
|
||||
buf.WriteString(fmt.Sprintf("&flowcontrolv1.FlowSchema{ObjectMeta: %#+v, Spec: ",
|
||||
fs.ObjectMeta))
|
||||
BufferFlowSchemaSpec(&buf, &fs.Spec)
|
||||
buf.WriteString(fmt.Sprintf(", Status: %#+v}", fs.Status))
|
||||
@ -146,7 +146,7 @@ func FmtFlowSchemaSpec(fsSpec *flowcontrol.FlowSchemaSpec) string {
|
||||
// BufferFlowSchemaSpec writes a golang source expression for the
|
||||
// given value to the given buffer
|
||||
func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) {
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ",
|
||||
buf.WriteString(fmt.Sprintf("flowcontrolv1.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ",
|
||||
fsSpec.PriorityLevelConfiguration,
|
||||
fsSpec.MatchingPrecedence))
|
||||
if fsSpec.DistinguisherMethod == nil {
|
||||
@ -166,7 +166,7 @@ func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec)
|
||||
|
||||
// FmtPolicyRulesWithSubjects produces a golang source expression of the value.
|
||||
func FmtPolicyRulesWithSubjects(rule flowcontrol.PolicyRulesWithSubjects) string {
|
||||
return "flowcontrolv1beta3.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule)
|
||||
return "flowcontrolv1.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule)
|
||||
}
|
||||
|
||||
// FmtPolicyRulesWithSubjectsSlim produces a golang source expression
|
||||
@ -182,7 +182,7 @@ func FmtPolicyRulesWithSubjectsSlim(rule flowcontrol.PolicyRulesWithSubjects) st
|
||||
// expression for the given value to the given buffer but excludes the
|
||||
// leading type name
|
||||
func BufferFmtPolicyRulesWithSubjectsSlim(buf *bytes.Buffer, rule flowcontrol.PolicyRulesWithSubjects) {
|
||||
buf.WriteString("{Subjects: []flowcontrolv1beta3.Subject{")
|
||||
buf.WriteString("{Subjects: []flowcontrolv1.Subject{")
|
||||
for jdx, subj := range rule.Subjects {
|
||||
if jdx > 0 {
|
||||
buf.WriteString(", ")
|
||||
|
16
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
16
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
@ -210,6 +210,16 @@ var (
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverCurrentInqueueSeats = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "current_inqueue_seats",
|
||||
Help: "Number of seats currently pending in queues of the API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverRequestQueueLength = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
@ -455,6 +465,7 @@ var (
|
||||
apiserverNextSBounds,
|
||||
apiserverNextDiscountedSBounds,
|
||||
apiserverCurrentInqueueRequests,
|
||||
apiserverCurrentInqueueSeats,
|
||||
apiserverRequestQueueLength,
|
||||
apiserverRequestConcurrencyLimit,
|
||||
apiserverRequestConcurrencyInUse,
|
||||
@ -518,6 +529,11 @@ func AddRequestsInQueues(ctx context.Context, priorityLevel, flowSchema string,
|
||||
apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
}
|
||||
|
||||
// AddSeatsInQueues adds the given delta to the gauge of the # of seats in the queues of the specified flowSchema and priorityLevel
|
||||
func AddSeatsInQueues(ctx context.Context, priorityLevel, flowSchema string, delta int) {
|
||||
apiserverCurrentInqueueSeats.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
}
|
||||
|
||||
// AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel
|
||||
func AddRequestsExecuting(ctx context.Context, priorityLevel, flowSchema string, delta int) {
|
||||
apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
|
8
vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
generated
vendored
8
vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
generated
vendored
@ -117,8 +117,7 @@ func (e *listWorkEstimator) estimate(r *http.Request, flowSchemaName, priorityLe
|
||||
}
|
||||
|
||||
limit := numStored
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) && listOptions.Limit > 0 &&
|
||||
listOptions.Limit < numStored {
|
||||
if listOptions.Limit > 0 && listOptions.Limit < numStored {
|
||||
limit = listOptions.Limit
|
||||
}
|
||||
|
||||
@ -165,15 +164,14 @@ func key(requestInfo *apirequest.RequestInfo) string {
|
||||
func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool {
|
||||
resourceVersion := opts.ResourceVersion
|
||||
match := opts.ResourceVersionMatch
|
||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||
consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache)
|
||||
|
||||
// Serve consistent reads from storage if ConsistentListFromCache is disabled
|
||||
consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled
|
||||
// Watch cache doesn't support continuations, so serve them from etcd.
|
||||
hasContinuation := pagingEnabled && len(opts.Continue) > 0
|
||||
hasContinuation := len(opts.Continue) > 0
|
||||
// Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd.
|
||||
hasLimit := pagingEnabled && opts.Limit > 0 && resourceVersion != "0"
|
||||
hasLimit := opts.Limit > 0 && resourceVersion != "0"
|
||||
// Watch cache only supports ResourceVersionMatchNotOlderThan (default).
|
||||
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan
|
||||
|
||||
|
2
vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go
generated
vendored
@ -19,7 +19,7 @@ package flowcontrol
|
||||
import (
|
||||
"strings"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1beta3"
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
|
4
vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go
generated
vendored
4
vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go
generated
vendored
@ -18,9 +18,9 @@ package webhook
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -233,7 +233,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf
|
||||
config.BearerToken = configAuthInfo.Token
|
||||
config.BearerTokenFile = configAuthInfo.TokenFile
|
||||
} else if len(configAuthInfo.TokenFile) > 0 {
|
||||
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
|
||||
tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Reference in New Issue
Block a user