mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
rebase: update kubernetes to 1.28.0 in main
updating kubernetes to 1.28.0 in the main repo. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
b2fdc269c3
commit
ff3e84ad67
161
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
161
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
@ -104,7 +104,7 @@ type Config struct {
|
||||
|
||||
Codec runtime.Codec
|
||||
|
||||
Clock clock.Clock
|
||||
Clock clock.WithTicker
|
||||
}
|
||||
|
||||
type watchersMap map[int]*cacheWatcher
|
||||
@ -184,7 +184,6 @@ func (i *indexedWatchers) terminateAll(groupResource schema.GroupResource, done
|
||||
// second in a bucket, and pop up them once at the timeout. To be more specific,
|
||||
// if you set fire time at X, you can get the bookmark within (X-1,X+1) period.
|
||||
type watcherBookmarkTimeBuckets struct {
|
||||
lock sync.Mutex
|
||||
// the key of watcherBuckets is the number of seconds since createTime
|
||||
watchersBuckets map[int64][]*cacheWatcher
|
||||
createTime time.Time
|
||||
@ -205,7 +204,7 @@ func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) *
|
||||
|
||||
// adds a watcher to the bucket, if the deadline is before the start, it will be
|
||||
// added to the first one.
|
||||
func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool {
|
||||
func (t *watcherBookmarkTimeBuckets) addWatcherThreadUnsafe(w *cacheWatcher) bool {
|
||||
// note that the returned time can be before t.createTime,
|
||||
// especially in cases when the nextBookmarkTime method
|
||||
// give us the zero value of type Time
|
||||
@ -215,8 +214,6 @@ func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool {
|
||||
return false
|
||||
}
|
||||
bucketID := int64(nextTime.Sub(t.createTime) / time.Second)
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if bucketID < t.startBucketID {
|
||||
bucketID = t.startBucketID
|
||||
}
|
||||
@ -225,12 +222,10 @@ func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *watcherBookmarkTimeBuckets) popExpiredWatchers() [][]*cacheWatcher {
|
||||
func (t *watcherBookmarkTimeBuckets) popExpiredWatchersThreadUnsafe() [][]*cacheWatcher {
|
||||
currentBucketID := int64(t.clock.Since(t.createTime) / time.Second)
|
||||
// There should be one or two elements in almost all cases
|
||||
expiredWatchers := make([][]*cacheWatcher, 0, 2)
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
for ; t.startBucketID <= currentBucketID; t.startBucketID++ {
|
||||
if watchers, ok := t.watchersBuckets[t.startBucketID]; ok {
|
||||
delete(t.watchersBuckets, t.startBucketID)
|
||||
@ -328,11 +323,16 @@ type Cacher struct {
|
||||
// dispatching that event to avoid race with closing channels in watchers.
|
||||
watchersToStop []*cacheWatcher
|
||||
// Maintain a timeout queue to send the bookmark event before the watcher times out.
|
||||
// Note that this field when accessed MUST be protected by the Cacher.lock.
|
||||
bookmarkWatchers *watcherBookmarkTimeBuckets
|
||||
// expiredBookmarkWatchers is a list of watchers that were expired and need to be schedule for a next bookmark event
|
||||
expiredBookmarkWatchers []*cacheWatcher
|
||||
}
|
||||
|
||||
func (c *Cacher) RequestWatchProgress(ctx context.Context) error {
|
||||
return c.storage.RequestWatchProgress(ctx)
|
||||
}
|
||||
|
||||
// NewCacherFromConfig creates a new Cacher responsible for servicing WATCH and LIST requests from
|
||||
// its internal cache and updating its cache in the background based on the
|
||||
// given configuration.
|
||||
@ -401,10 +401,10 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
|
||||
// so that future reuse does not get a spurious timeout.
|
||||
<-cacher.timer.C
|
||||
}
|
||||
|
||||
progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock)
|
||||
watchCache := newWatchCache(
|
||||
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource)
|
||||
listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
||||
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource, progressRequester)
|
||||
listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
||||
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
|
||||
|
||||
reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0)
|
||||
@ -423,6 +423,7 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
|
||||
cacher.reflector = reflector
|
||||
|
||||
go cacher.dispatchEvents()
|
||||
go progressRequester.Run(stopCh)
|
||||
|
||||
cacher.stopWg.Add(1)
|
||||
go func() {
|
||||
@ -592,6 +593,18 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
|
||||
identifier,
|
||||
)
|
||||
|
||||
// note that c.waitUntilWatchCacheFreshAndForceAllEvents must be called without
|
||||
// the c.watchCache.RLock held otherwise we are at risk of a deadlock
|
||||
// mainly because c.watchCache.processEvent method won't be able to make progress
|
||||
//
|
||||
// moreover even though the c.waitUntilWatchCacheFreshAndForceAllEvents acquires a lock
|
||||
// it is safe to release the lock after the method finishes because we don't require
|
||||
// any atomicity between the call to the method and further calls that actually get the events.
|
||||
forceAllEvents, err := c.waitUntilWatchCacheFreshAndForceAllEvents(ctx, requestedWatchRV, opts)
|
||||
if err != nil {
|
||||
return newErrWatcher(err), nil
|
||||
}
|
||||
|
||||
// We explicitly use thread unsafe version and do locking ourself to ensure that
|
||||
// no new events will be processed in the meantime. The watchCache will be unlocked
|
||||
// on return from this function.
|
||||
@ -599,10 +612,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
|
||||
// underlying watchCache is calling processEvent under its lock.
|
||||
c.watchCache.RLock()
|
||||
defer c.watchCache.RUnlock()
|
||||
forceAllEvents, err := c.waitUntilWatchCacheFreshAndForceAllEvents(ctx, requestedWatchRV, opts)
|
||||
if err != nil {
|
||||
return newErrWatcher(err), nil
|
||||
}
|
||||
|
||||
startWatchRV := startWatchResourceVersionFn()
|
||||
var cacheInterval *watchCacheInterval
|
||||
if forceAllEvents {
|
||||
@ -638,7 +648,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
|
||||
|
||||
// Add it to the queue only when the client support watch bookmarks.
|
||||
if watcher.allowWatchBookmarks {
|
||||
c.bookmarkWatchers.addWatcher(watcher)
|
||||
c.bookmarkWatchers.addWatcherThreadUnsafe(watcher)
|
||||
}
|
||||
c.watcherIdx++
|
||||
}()
|
||||
@ -716,17 +726,18 @@ func shouldDelegateList(opts storage.ListOptions) bool {
|
||||
pred := opts.Predicate
|
||||
match := opts.ResourceVersionMatch
|
||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||
consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache)
|
||||
|
||||
// Serve consistent reads from storage if ConsistentListFromCache is disabled
|
||||
consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled
|
||||
// Watch cache doesn't support continuations, so serve them from etcd.
|
||||
hasContinuation := pagingEnabled && len(pred.Continue) > 0
|
||||
// Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd.
|
||||
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
|
||||
// Watch cache only supports ResourceVersionMatchNotOlderThan (default).
|
||||
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan
|
||||
|
||||
// If resourceVersion is not specified, serve it from underlying
|
||||
// storage (for backward compatibility). If a continuation is
|
||||
// requested, serve it from the underlying storage as well.
|
||||
// Limits are only sent to storage when resourceVersion is non-zero
|
||||
// since the watch cache isn't able to perform continuations, and
|
||||
// limits are ignored when resource version is zero
|
||||
return resourceVersion == "" || hasContinuation || hasLimit || unsupportedMatch
|
||||
return consistentReadFromStorage || hasContinuation || hasLimit || unsupportedMatch
|
||||
}
|
||||
|
||||
func (c *Cacher) listItems(ctx context.Context, listRV uint64, key string, pred storage.SelectionPredicate, recursive bool) ([]interface{}, uint64, string, error) {
|
||||
@ -752,19 +763,21 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
|
||||
return c.storage.GetList(ctx, key, opts, listObj)
|
||||
}
|
||||
|
||||
// If resourceVersion is specified, serve it from cache.
|
||||
// It's guaranteed that the returned value is at least that
|
||||
// fresh as the given resourceVersion.
|
||||
listRV, err := c.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if listRV == 0 && !c.ready.check() {
|
||||
// If Cacher is not yet initialized and we don't require any specific
|
||||
// minimal resource version, simply forward the request to storage.
|
||||
return c.storage.GetList(ctx, key, opts, listObj)
|
||||
}
|
||||
if listRV == 0 && utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) {
|
||||
listRV, err = c.getCurrentResourceVersionFromStorage(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx, span := tracing.Start(ctx, "cacher list",
|
||||
attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)),
|
||||
@ -795,24 +808,30 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
|
||||
return err
|
||||
}
|
||||
span.AddEvent("Listed items from cache", attribute.Int("count", len(objs)))
|
||||
if len(objs) > listVal.Cap() && pred.Label.Empty() && pred.Field.Empty() {
|
||||
// Resize the slice appropriately, since we already know that none
|
||||
// of the elements will be filtered out.
|
||||
listVal.Set(reflect.MakeSlice(reflect.SliceOf(c.objectType.Elem()), 0, len(objs)))
|
||||
span.AddEvent("Resized result")
|
||||
}
|
||||
// store pointer of eligible objects,
|
||||
// Why not directly put object in the items of listObj?
|
||||
// the elements in ListObject are Struct type, making slice will bring excessive memory consumption.
|
||||
// so we try to delay this action as much as possible
|
||||
var selectedObjects []runtime.Object
|
||||
for _, obj := range objs {
|
||||
elem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
|
||||
}
|
||||
if filter(elem.Key, elem.Labels, elem.Fields) {
|
||||
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
|
||||
selectedObjects = append(selectedObjects, elem.Object)
|
||||
}
|
||||
}
|
||||
if listVal.IsNil() {
|
||||
if len(selectedObjects) == 0 {
|
||||
// Ensure that we never return a nil Items pointer in the result for consistency.
|
||||
listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0))
|
||||
} else {
|
||||
// Resize the slice appropriately, since we already know that size of result set
|
||||
listVal.Set(reflect.MakeSlice(listVal.Type(), len(selectedObjects), len(selectedObjects)))
|
||||
span.AddEvent("Resized result")
|
||||
for i, o := range selectedObjects {
|
||||
listVal.Index(i).Set(reflect.ValueOf(o).Elem())
|
||||
}
|
||||
}
|
||||
span.AddEvent("Filtered items", attribute.Int("count", listVal.Len()))
|
||||
if c.versioner != nil {
|
||||
@ -911,9 +930,25 @@ func (c *Cacher) dispatchEvents() {
|
||||
bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25))
|
||||
// Never send a bookmark event if we did not see an event here, this is fine
|
||||
// because we don't provide any guarantees on sending bookmarks.
|
||||
//
|
||||
// Just pop closed watchers and requeue others if needed.
|
||||
//
|
||||
// TODO(#115478): rework the following logic
|
||||
// in a way that would allow more
|
||||
// efficient cleanup of closed watchers
|
||||
if lastProcessedResourceVersion == 0 {
|
||||
// pop expired watchers in case there has been no update
|
||||
c.bookmarkWatchers.popExpiredWatchers()
|
||||
func() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
for _, watchers := range c.bookmarkWatchers.popExpiredWatchersThreadUnsafe() {
|
||||
for _, watcher := range watchers {
|
||||
if watcher.stopped {
|
||||
continue
|
||||
}
|
||||
c.bookmarkWatchers.addWatcherThreadUnsafe(watcher)
|
||||
}
|
||||
}
|
||||
}()
|
||||
continue
|
||||
}
|
||||
bookmarkEvent := &watchCacheEvent{
|
||||
@ -1035,7 +1070,7 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
|
||||
func (c *Cacher) startDispatchingBookmarkEventsLocked() {
|
||||
// Pop already expired watchers. However, explicitly ignore stopped ones,
|
||||
// as we don't delete watcher from bookmarkWatchers when it is stopped.
|
||||
for _, watchers := range c.bookmarkWatchers.popExpiredWatchers() {
|
||||
for _, watchers := range c.bookmarkWatchers.popExpiredWatchersThreadUnsafe() {
|
||||
for _, watcher := range watchers {
|
||||
// c.Lock() is held here.
|
||||
// watcher.stopThreadUnsafe() is protected by c.Lock()
|
||||
@ -1140,7 +1175,7 @@ func (c *Cacher) finishDispatching() {
|
||||
continue
|
||||
}
|
||||
// requeue the watcher for the next bookmark if needed.
|
||||
c.bookmarkWatchers.addWatcher(watcher)
|
||||
c.bookmarkWatchers.addWatcherThreadUnsafe(watcher)
|
||||
}
|
||||
c.expiredBookmarkWatchers = c.expiredBookmarkWatchers[:0]
|
||||
}
|
||||
@ -1309,54 +1344,6 @@ func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher.
|
||||
type cacherListerWatcher struct {
|
||||
storage storage.Interface
|
||||
resourcePrefix string
|
||||
newListFunc func() runtime.Object
|
||||
}
|
||||
|
||||
// NewCacherListerWatcher returns a storage.Interface backed ListerWatcher.
|
||||
func NewCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
|
||||
return &cacherListerWatcher{
|
||||
storage: storage,
|
||||
resourcePrefix: resourcePrefix,
|
||||
newListFunc: newListFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements cache.ListerWatcher interface.
|
||||
func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
list := lw.newListFunc()
|
||||
pred := storage.SelectionPredicate{
|
||||
Label: labels.Everything(),
|
||||
Field: fields.Everything(),
|
||||
Limit: options.Limit,
|
||||
Continue: options.Continue,
|
||||
}
|
||||
|
||||
storageOpts := storage.ListOptions{
|
||||
ResourceVersionMatch: options.ResourceVersionMatch,
|
||||
Predicate: pred,
|
||||
Recursive: true,
|
||||
}
|
||||
if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Implements cache.ListerWatcher interface.
|
||||
func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
opts := storage.ListOptions{
|
||||
ResourceVersion: options.ResourceVersion,
|
||||
Predicate: storage.Everything,
|
||||
Recursive: true,
|
||||
ProgressNotify: true,
|
||||
}
|
||||
return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts)
|
||||
}
|
||||
|
||||
// errWatcher implements watch.Interface to return a single error
|
||||
type errWatcher struct {
|
||||
result chan watch.Event
|
||||
|
4
vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go
generated
vendored
4
vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go
generated
vendored
@ -148,6 +148,10 @@ func (o *cachingObject) CacheEncode(id runtime.Identifier, encode func(runtime.O
|
||||
if result.err != nil {
|
||||
return result.err
|
||||
}
|
||||
if b, support := w.(runtime.Splice); support {
|
||||
b.Splice(result.raw)
|
||||
return nil
|
||||
}
|
||||
_, err := w.Write(result.raw)
|
||||
return err
|
||||
}
|
||||
|
77
vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go
generated
vendored
Normal file
77
vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// listerWatcher opaques storage.Interface to expose cache.ListerWatcher.
|
||||
type listerWatcher struct {
|
||||
storage storage.Interface
|
||||
resourcePrefix string
|
||||
newListFunc func() runtime.Object
|
||||
}
|
||||
|
||||
// NewListerWatcher returns a storage.Interface backed ListerWatcher.
|
||||
func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
|
||||
return &listerWatcher{
|
||||
storage: storage,
|
||||
resourcePrefix: resourcePrefix,
|
||||
newListFunc: newListFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements cache.ListerWatcher interface.
|
||||
func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
list := lw.newListFunc()
|
||||
pred := storage.SelectionPredicate{
|
||||
Label: labels.Everything(),
|
||||
Field: fields.Everything(),
|
||||
Limit: options.Limit,
|
||||
Continue: options.Continue,
|
||||
}
|
||||
|
||||
storageOpts := storage.ListOptions{
|
||||
ResourceVersionMatch: options.ResourceVersionMatch,
|
||||
Predicate: pred,
|
||||
Recursive: true,
|
||||
}
|
||||
if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Implements cache.ListerWatcher interface.
|
||||
func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
opts := storage.ListOptions{
|
||||
ResourceVersion: options.ResourceVersion,
|
||||
Predicate: storage.Everything,
|
||||
Recursive: true,
|
||||
ProgressNotify: true,
|
||||
}
|
||||
return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts)
|
||||
}
|
34
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go
generated
vendored
34
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go
generated
vendored
@ -30,8 +30,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/cacher/metrics"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
@ -196,6 +198,10 @@ type watchCache struct {
|
||||
|
||||
// For testing cache interval invalidation.
|
||||
indexValidator indexValidator
|
||||
|
||||
// Requests progress notification if there are requests waiting for watch
|
||||
// to be fresh
|
||||
waitingUntilFresh *conditionalProgressRequester
|
||||
}
|
||||
|
||||
func newWatchCache(
|
||||
@ -204,8 +210,9 @@ func newWatchCache(
|
||||
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error),
|
||||
versioner storage.Versioner,
|
||||
indexers *cache.Indexers,
|
||||
clock clock.Clock,
|
||||
groupResource schema.GroupResource) *watchCache {
|
||||
clock clock.WithTicker,
|
||||
groupResource schema.GroupResource,
|
||||
progressRequester *conditionalProgressRequester) *watchCache {
|
||||
wc := &watchCache{
|
||||
capacity: defaultLowerBoundCapacity,
|
||||
keyFunc: keyFunc,
|
||||
@ -222,6 +229,7 @@ func newWatchCache(
|
||||
clock: clock,
|
||||
versioner: versioner,
|
||||
groupResource: groupResource,
|
||||
waitingUntilFresh: progressRequester,
|
||||
}
|
||||
metrics.WatchCacheCapacity.WithLabelValues(groupResource.String()).Set(float64(wc.capacity))
|
||||
wc.cond = sync.NewCond(wc.RLocker())
|
||||
@ -305,7 +313,7 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd
|
||||
|
||||
if err := func() error {
|
||||
// TODO: We should consider moving this lock below after the watchCacheEvent
|
||||
// is created. In such situation, the only problematic scenario is Replace(
|
||||
// is created. In such situation, the only problematic scenario is Replace()
|
||||
// happening after getting object from store and before acquiring a lock.
|
||||
// Maybe introduce another lock for this purpose.
|
||||
w.Lock()
|
||||
@ -406,6 +414,7 @@ func (w *watchCache) UpdateResourceVersion(resourceVersion string) {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
w.resourceVersion = rv
|
||||
w.cond.Broadcast()
|
||||
}()
|
||||
|
||||
// Avoid calling event handler under lock.
|
||||
@ -484,7 +493,14 @@ func (s sortableStoreElements) Swap(i, j int) {
|
||||
// WaitUntilFreshAndList returns list of pointers to `storeElement` objects along
|
||||
// with their ResourceVersion and the name of the index, if any, that was used.
|
||||
func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) {
|
||||
err := w.waitUntilFreshAndBlock(ctx, resourceVersion)
|
||||
var err error
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && w.notFresh(resourceVersion) {
|
||||
w.waitingUntilFresh.Add()
|
||||
err = w.waitUntilFreshAndBlock(ctx, resourceVersion)
|
||||
w.waitingUntilFresh.Remove()
|
||||
} else {
|
||||
err = w.waitUntilFreshAndBlock(ctx, resourceVersion)
|
||||
}
|
||||
defer w.RUnlock()
|
||||
if err != nil {
|
||||
return nil, 0, "", err
|
||||
@ -507,6 +523,12 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion
|
||||
return result, rv, index, err
|
||||
}
|
||||
|
||||
func (w *watchCache) notFresh(resourceVersion uint64) bool {
|
||||
w.RLock()
|
||||
defer w.RUnlock()
|
||||
return resourceVersion > w.resourceVersion
|
||||
}
|
||||
|
||||
// WaitUntilFreshAndGet returns a pointers to <storeElement> object.
|
||||
func (w *watchCache) WaitUntilFreshAndGet(ctx context.Context, resourceVersion uint64, key string) (interface{}, bool, uint64, error) {
|
||||
err := w.waitUntilFreshAndBlock(ctx, resourceVersion)
|
||||
@ -608,8 +630,8 @@ func (w *watchCache) Resync() error {
|
||||
}
|
||||
|
||||
func (w *watchCache) currentCapacity() int {
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
w.RLock()
|
||||
defer w.RUnlock()
|
||||
return w.capacity
|
||||
}
|
||||
|
||||
|
121
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go
generated
vendored
Normal file
121
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
// progressRequestPeriod determines period of requesting progress
|
||||
// from etcd when there is a request waiting for watch cache to be fresh.
|
||||
progressRequestPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory) *conditionalProgressRequester {
|
||||
pr := &conditionalProgressRequester{
|
||||
clock: clock,
|
||||
requestWatchProgress: requestWatchProgress,
|
||||
}
|
||||
pr.cond = sync.NewCond(pr.mux.RLocker())
|
||||
return pr
|
||||
}
|
||||
|
||||
type WatchProgressRequester func(ctx context.Context) error
|
||||
|
||||
type TickerFactory interface {
|
||||
NewTicker(time.Duration) clock.Ticker
|
||||
}
|
||||
|
||||
// conditionalProgressRequester will request progress notification if there
|
||||
// is a request waiting for watch cache to be fresh.
|
||||
type conditionalProgressRequester struct {
|
||||
clock TickerFactory
|
||||
requestWatchProgress WatchProgressRequester
|
||||
|
||||
mux sync.RWMutex
|
||||
cond *sync.Cond
|
||||
waiting int
|
||||
stopped bool
|
||||
}
|
||||
|
||||
func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) {
|
||||
ctx := wait.ContextForChannel(stopCh)
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
<-stopCh
|
||||
pr.mux.Lock()
|
||||
defer pr.mux.Unlock()
|
||||
pr.stopped = true
|
||||
pr.cond.Signal()
|
||||
}()
|
||||
ticker := pr.clock.NewTicker(progressRequestPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
stopped := func() bool {
|
||||
pr.mux.RLock()
|
||||
defer pr.mux.RUnlock()
|
||||
for pr.waiting == 0 && !pr.stopped {
|
||||
pr.cond.Wait()
|
||||
}
|
||||
return pr.stopped
|
||||
}()
|
||||
if stopped {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ticker.C():
|
||||
shouldRequest := func() bool {
|
||||
pr.mux.RLock()
|
||||
defer pr.mux.RUnlock()
|
||||
return pr.waiting > 0 && !pr.stopped
|
||||
}()
|
||||
if !shouldRequest {
|
||||
continue
|
||||
}
|
||||
err := pr.requestWatchProgress(ctx)
|
||||
if err != nil {
|
||||
klog.V(4).InfoS("Error requesting bookmark", "err", err)
|
||||
}
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *conditionalProgressRequester) Add() {
|
||||
pr.mux.Lock()
|
||||
defer pr.mux.Unlock()
|
||||
pr.waiting += 1
|
||||
pr.cond.Signal()
|
||||
}
|
||||
|
||||
func (pr *conditionalProgressRequester) Remove() {
|
||||
pr.mux.Lock()
|
||||
defer pr.mux.Unlock()
|
||||
pr.waiting -= 1
|
||||
pr.cond.Signal()
|
||||
}
|
Reference in New Issue
Block a user