mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: update kubernetes to latest
updating the kubernetes release to the latest in main go.mod Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
63c4c05b35
commit
5a66991bb3
99
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
99
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
@ -135,7 +135,7 @@ type configController struct {
|
||||
|
||||
// configQueue holds `(interface{})(0)` when the configuration
|
||||
// objects need to be reprocessed.
|
||||
configQueue workqueue.RateLimitingInterface
|
||||
configQueue workqueue.TypedRateLimitingInterface[int]
|
||||
|
||||
plLister flowcontrollister.PriorityLevelConfigurationLister
|
||||
plInformerSynced cache.InformerSynced
|
||||
@ -204,7 +204,7 @@ type priorityLevelState struct {
|
||||
// reached through this pointer is mutable.
|
||||
pl *flowcontrol.PriorityLevelConfiguration
|
||||
|
||||
// qsCompleter holds the QueueSetCompleter derived from `config`
|
||||
// qsCompleter holds the QueueSetCompleter derived from `pl`
|
||||
// and `queues`.
|
||||
qsCompleter fq.QueueSetCompleter
|
||||
|
||||
@ -255,12 +255,12 @@ type priorityLevelState struct {
|
||||
type seatDemandStats struct {
|
||||
avg float64
|
||||
stdDev float64
|
||||
highWatermark float64
|
||||
highWatermark int
|
||||
smoothed float64
|
||||
}
|
||||
|
||||
func (stats *seatDemandStats) update(obs fq.IntegratorResults) {
|
||||
stats.highWatermark = obs.Max
|
||||
stats.highWatermark = int(math.Round(obs.Max))
|
||||
if obs.Duration <= 0 {
|
||||
return
|
||||
}
|
||||
@ -292,7 +292,10 @@ func newTestableController(config TestableConfig) *configController {
|
||||
klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager)
|
||||
// Start with longish delay because conflicts will be between
|
||||
// different processes, so take some time to go away.
|
||||
cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue")
|
||||
cfgCtlr.configQueue = workqueue.NewTypedRateLimitingQueueWithConfig(
|
||||
workqueue.NewTypedItemExponentialFailureRateLimiter[int](200*time.Millisecond, 8*time.Hour),
|
||||
workqueue.TypedRateLimitingQueueConfig[int]{Name: "priority_and_fairness_config_queue"},
|
||||
)
|
||||
// ensure the data structure reflects the mandatory config
|
||||
cfgCtlr.lockAndDigestConfigObjects(nil, nil)
|
||||
fci := config.InformerFactory.Flowcontrol().V1()
|
||||
@ -395,38 +398,63 @@ func (cfgCtlr *configController) updateBorrowing() {
|
||||
|
||||
func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plStates map[string]*priorityLevelState) {
|
||||
items := make([]allocProblemItem, 0, len(plStates))
|
||||
plNames := make([]string, 0, len(plStates))
|
||||
nonExemptPLNames := make([]string, 0, len(plStates))
|
||||
idxOfNonExempt := map[string]int{} // items index of non-exempt classes
|
||||
cclOfExempt := map[string]int{} // minCurrentCL of exempt classes
|
||||
var minCLSum, minCurrentCLSum int // sums over non-exempt classes
|
||||
remainingServerCL := cfgCtlr.nominalCLSum
|
||||
for plName, plState := range plStates {
|
||||
obs := plState.seatDemandIntegrator.Reset()
|
||||
plState.seatDemandStats.update(obs)
|
||||
// Lower bound on this priority level's adjusted concurreny limit is the lesser of:
|
||||
// - its seat demamd high watermark over the last adjustment period, and
|
||||
// - its configured concurrency limit.
|
||||
// BUT: we do not want this to be lower than the lower bound from configuration.
|
||||
// See KEP-1040 for a more detailed explanation.
|
||||
minCurrentCL := math.Max(float64(plState.minCL), math.Min(float64(plState.nominalCL), plState.seatDemandStats.highWatermark))
|
||||
plNames = append(plNames, plName)
|
||||
items = append(items, allocProblemItem{
|
||||
lowerBound: minCurrentCL,
|
||||
upperBound: float64(plState.maxCL),
|
||||
target: math.Max(minCurrentCL, plState.seatDemandStats.smoothed),
|
||||
})
|
||||
var minCurrentCL int
|
||||
if plState.pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt {
|
||||
minCurrentCL = max(plState.minCL, plState.seatDemandStats.highWatermark)
|
||||
cclOfExempt[plName] = minCurrentCL
|
||||
remainingServerCL -= minCurrentCL
|
||||
} else {
|
||||
// Lower bound on this priority level's adjusted concurreny limit is the lesser of:
|
||||
// - its seat demamd high watermark over the last adjustment period, and
|
||||
// - its configured concurrency limit.
|
||||
// BUT: we do not want this to be lower than the lower bound from configuration.
|
||||
// See KEP-1040 for a more detailed explanation.
|
||||
minCurrentCL = max(plState.minCL, min(plState.nominalCL, plState.seatDemandStats.highWatermark))
|
||||
idxOfNonExempt[plName] = len(items)
|
||||
nonExemptPLNames = append(nonExemptPLNames, plName)
|
||||
items = append(items, allocProblemItem{
|
||||
lowerBound: float64(minCurrentCL),
|
||||
upperBound: float64(plState.maxCL),
|
||||
target: math.Max(float64(minCurrentCL), plState.seatDemandStats.smoothed),
|
||||
})
|
||||
minCLSum += plState.minCL
|
||||
minCurrentCLSum += minCurrentCL
|
||||
}
|
||||
}
|
||||
if len(items) == 0 && cfgCtlr.nominalCLSum > 0 {
|
||||
klog.ErrorS(nil, "Impossible: no priority levels", "plStates", cfgCtlr.priorityLevelStates)
|
||||
return
|
||||
}
|
||||
allocs, fairFrac, err := computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", plNames, "items", items)
|
||||
allocs = make([]float64, len(items))
|
||||
for idx, plName := range plNames {
|
||||
plState := plStates[plName]
|
||||
allocs[idx] = float64(plState.currentCL)
|
||||
var allocs []float64
|
||||
var shareFrac, fairFrac float64
|
||||
var err error
|
||||
if remainingServerCL <= minCLSum {
|
||||
metrics.SetFairFrac(0)
|
||||
} else if remainingServerCL <= minCurrentCLSum {
|
||||
shareFrac = float64(remainingServerCL-minCLSum) / float64(minCurrentCLSum-minCLSum)
|
||||
metrics.SetFairFrac(0)
|
||||
} else {
|
||||
allocs, fairFrac, err = computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", nonExemptPLNames, "items", items)
|
||||
allocs = make([]float64, len(items))
|
||||
for idx, plName := range nonExemptPLNames {
|
||||
plState := plStates[plName]
|
||||
allocs[idx] = float64(plState.currentCL)
|
||||
}
|
||||
}
|
||||
metrics.SetFairFrac(float64(fairFrac))
|
||||
}
|
||||
for idx, plName := range plNames {
|
||||
plState := plStates[plName]
|
||||
for plName, plState := range plStates {
|
||||
idx, isNonExempt := idxOfNonExempt[plName]
|
||||
if setCompleters {
|
||||
qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
@ -437,10 +465,20 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta
|
||||
}
|
||||
plState.qsCompleter = qsCompleter
|
||||
}
|
||||
currentCL := int(math.Round(float64(allocs[idx])))
|
||||
var currentCL int
|
||||
if !isNonExempt {
|
||||
currentCL = cclOfExempt[plName]
|
||||
} else if remainingServerCL <= minCLSum {
|
||||
currentCL = plState.minCL
|
||||
} else if remainingServerCL <= minCurrentCLSum {
|
||||
minCurrentCL := max(plState.minCL, min(plState.nominalCL, plState.seatDemandStats.highWatermark))
|
||||
currentCL = plState.minCL + int(math.Round(float64(minCurrentCL-plState.minCL)*shareFrac))
|
||||
} else {
|
||||
currentCL = int(math.Round(float64(allocs[idx])))
|
||||
}
|
||||
relChange := relDiff(float64(currentCL), float64(plState.currentCL))
|
||||
plState.currentCL = currentCL
|
||||
metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, plState.seatDemandStats.highWatermark, plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL)
|
||||
metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, float64(plState.seatDemandStats.highWatermark), plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL)
|
||||
logLevel := klog.Level(4)
|
||||
if relChange >= 0.05 {
|
||||
logLevel = 2
|
||||
@ -455,7 +493,6 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta
|
||||
klog.V(logLevel).InfoS("Update CurrentCL", "plName", plName, "seatDemandHighWatermark", plState.seatDemandStats.highWatermark, "seatDemandAvg", plState.seatDemandStats.avg, "seatDemandStdev", plState.seatDemandStats.stdDev, "seatDemandSmoothed", plState.seatDemandStats.smoothed, "fairFrac", fairFrac, "currentCL", currentCL, "concurrencyDenominator", concurrencyDenominator, "backstop", err != nil)
|
||||
plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: currentCL, ConcurrencyDenominator: concurrencyDenominator})
|
||||
}
|
||||
metrics.SetFairFrac(float64(fairFrac))
|
||||
}
|
||||
|
||||
// runWorker is the logic of the one and only worker goroutine. We
|
||||
@ -474,7 +511,7 @@ func (cfgCtlr *configController) processNextWorkItem() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func(obj interface{}) {
|
||||
func(obj int) {
|
||||
defer cfgCtlr.configQueue.Done(obj)
|
||||
specificDelay, err := cfgCtlr.syncOne()
|
||||
switch {
|
||||
|
13
vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
generated
vendored
13
vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
etcdfeature "k8s.io/apiserver/pkg/storage/feature"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@ -165,15 +167,16 @@ func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool {
|
||||
resourceVersion := opts.ResourceVersion
|
||||
match := opts.ResourceVersionMatch
|
||||
consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache)
|
||||
requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress)
|
||||
|
||||
// Serve consistent reads from storage if ConsistentListFromCache is disabled
|
||||
consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled
|
||||
consistentReadFromStorage := resourceVersion == "" && !(consistentListFromCacheEnabled && requestWatchProgressSupported)
|
||||
// Watch cache doesn't support continuations, so serve them from etcd.
|
||||
hasContinuation := len(opts.Continue) > 0
|
||||
// Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd.
|
||||
hasLimit := opts.Limit > 0 && resourceVersion != "0"
|
||||
// Watch cache only supports ResourceVersionMatchNotOlderThan (default).
|
||||
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan
|
||||
// see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-get-and-list
|
||||
isLegacyExactMatch := opts.Limit > 0 && match == "" && len(resourceVersion) > 0 && resourceVersion != "0"
|
||||
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan || isLegacyExactMatch
|
||||
|
||||
return consistentReadFromStorage || hasContinuation || hasLimit || unsupportedMatch
|
||||
return consistentReadFromStorage || hasContinuation || unsupportedMatch
|
||||
}
|
||||
|
Reference in New Issue
Block a user