mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
rebase: update K8s packages to v0.32.1
Update K8s packages in go.mod to v0.32.1 Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
11
vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go
generated
vendored
11
vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go
generated
vendored
@ -454,6 +454,13 @@ func (c *cacheWatcher) processInterval(ctx context.Context, cacheInterval *watch
|
||||
const initProcessThreshold = 500 * time.Millisecond
|
||||
startTime := time.Now()
|
||||
|
||||
// cacheInterval may be created from a version being more fresh than requested
|
||||
// (e.g. for NotOlderThan semantic). In such a case, we need to prevent watch event
|
||||
// with lower resourceVersion from being delivered to ensure watch contract.
|
||||
if cacheInterval.resourceVersion > resourceVersion {
|
||||
resourceVersion = cacheInterval.resourceVersion
|
||||
}
|
||||
|
||||
initEventCount := 0
|
||||
for {
|
||||
event, err := cacheInterval.Next()
|
||||
@ -503,6 +510,10 @@ func (c *cacheWatcher) processInterval(ctx context.Context, cacheInterval *watch
|
||||
klog.V(2).Infof("processing %d initEvents of %s (%s) took %v", initEventCount, c.groupResource, c.identifier, processingTime)
|
||||
}
|
||||
|
||||
// send bookmark after sending all events in cacheInterval for watchlist request
|
||||
if cacheInterval.initialEventsEndBookmark != nil {
|
||||
c.sendWatchCacheEvent(cacheInterval.initialEventsEndBookmark)
|
||||
}
|
||||
c.process(ctx, resourceVersion)
|
||||
}
|
||||
|
||||
|
42
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
42
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
@ -492,7 +492,7 @@ func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object
|
||||
// Delete implements storage.Interface.
|
||||
func (c *Cacher) Delete(
|
||||
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
|
||||
validateDeletion storage.ValidateObjectFunc, _ runtime.Object) error {
|
||||
validateDeletion storage.ValidateObjectFunc, _ runtime.Object, opts storage.DeleteOptions) error {
|
||||
// Ignore the suggestion and try to pass down the current version of the object
|
||||
// read from cache.
|
||||
if elem, exists, err := c.watchCache.GetByKey(key); err != nil {
|
||||
@ -501,10 +501,10 @@ func (c *Cacher) Delete(
|
||||
// DeepCopy the object since we modify resource version when serializing the
|
||||
// current object.
|
||||
currObj := elem.(*storeElement).Object.DeepCopyObject()
|
||||
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, currObj)
|
||||
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, currObj, opts)
|
||||
}
|
||||
// If we couldn't get the object, fallback to no-suggestion.
|
||||
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, nil)
|
||||
return c.storage.Delete(ctx, key, out, preconditions, validateDeletion, nil, opts)
|
||||
}
|
||||
|
||||
type namespacedName struct {
|
||||
@ -653,6 +653,8 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
|
||||
return newErrWatcher(err), nil
|
||||
}
|
||||
|
||||
c.setInitialEventsEndBookmarkIfRequested(cacheInterval, opts, c.watchCache.resourceVersion)
|
||||
|
||||
addedWatcher := false
|
||||
func() {
|
||||
c.Lock()
|
||||
@ -693,9 +695,15 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
|
||||
|
||||
// Get implements storage.Interface.
|
||||
func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error {
|
||||
ctx, span := tracing.Start(ctx, "cacher.Get",
|
||||
attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)),
|
||||
attribute.String("key", key),
|
||||
attribute.String("resource-version", opts.ResourceVersion))
|
||||
defer span.End(500 * time.Millisecond)
|
||||
if opts.ResourceVersion == "" {
|
||||
// If resourceVersion is not specified, serve it from underlying
|
||||
// storage (for backward compatibility).
|
||||
span.AddEvent("About to Get from underlying storage")
|
||||
return c.storage.Get(ctx, key, opts, objPtr)
|
||||
}
|
||||
|
||||
@ -703,6 +711,7 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
|
||||
if !c.ready.check() {
|
||||
// If Cache is not initialized, delegate Get requests to storage
|
||||
// as described in https://kep.k8s.io/4568
|
||||
span.AddEvent("About to Get from underlying storage - cache not initialized")
|
||||
return c.storage.Get(ctx, key, opts, objPtr)
|
||||
}
|
||||
}
|
||||
@ -722,6 +731,7 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
|
||||
if getRV == 0 && !c.ready.check() {
|
||||
// If Cacher is not yet initialized and we don't require any specific
|
||||
// minimal resource version, simply forward the request to storage.
|
||||
span.AddEvent("About to Get from underlying storage - cache not initialized and no resourceVersion set")
|
||||
return c.storage.Get(ctx, key, opts, objPtr)
|
||||
}
|
||||
if err := c.ready.wait(ctx); err != nil {
|
||||
@ -734,6 +744,7 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
|
||||
return err
|
||||
}
|
||||
|
||||
span.AddEvent("About to fetch object from cache")
|
||||
obj, exists, readResourceVersion, err := c.watchCache.WaitUntilFreshAndGet(ctx, getRV, key)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -856,7 +867,7 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
|
||||
}
|
||||
}
|
||||
|
||||
ctx, span := tracing.Start(ctx, "cacher list",
|
||||
ctx, span := tracing.Start(ctx, "cacher.GetList",
|
||||
attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)),
|
||||
attribute.Stringer("type", c.groupResource))
|
||||
defer span.End(500 * time.Millisecond)
|
||||
@ -1119,6 +1130,9 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
|
||||
// Since add() can block, we explicitly add when cacher is unlocked.
|
||||
// Dispatching event in nonblocking way first, which make faster watchers
|
||||
// not be blocked by slower ones.
|
||||
//
|
||||
// Note: if we ever decide to cache the serialization of bookmark events,
|
||||
// we will also need to modify the watchEncoder encoder
|
||||
if event.Type == watch.Bookmark {
|
||||
for _, watcher := range c.watchersBuffer {
|
||||
watcher.nonblockingAdd(event)
|
||||
@ -1439,6 +1453,26 @@ func (c *Cacher) Wait(ctx context.Context) error {
|
||||
return c.ready.wait(ctx)
|
||||
}
|
||||
|
||||
// setInitialEventsEndBookmarkIfRequested sets initialEventsEndBookmark field in watchCacheInterval for watchlist request
|
||||
func (c *Cacher) setInitialEventsEndBookmarkIfRequested(cacheInterval *watchCacheInterval, opts storage.ListOptions, currentResourceVersion uint64) {
|
||||
if opts.SendInitialEvents != nil && *opts.SendInitialEvents && opts.Predicate.AllowWatchBookmarks {
|
||||
// We don't need to set the InitialEventsAnnotation for this bookmark event,
|
||||
// because this will be automatically set during event conversion in cacheWatcher.convertToWatchEvent method
|
||||
initialEventsEndBookmark := &watchCacheEvent{
|
||||
Type: watch.Bookmark,
|
||||
Object: c.newFunc(),
|
||||
ResourceVersion: currentResourceVersion,
|
||||
}
|
||||
|
||||
if err := c.versioner.UpdateObject(initialEventsEndBookmark.Object, initialEventsEndBookmark.ResourceVersion); err != nil {
|
||||
klog.Errorf("failure to set resourceVersion to %d on initialEventsEndBookmark event %+v for watchlist request and wait for bookmark trigger to send", initialEventsEndBookmark.ResourceVersion, initialEventsEndBookmark.Object)
|
||||
initialEventsEndBookmark = nil
|
||||
}
|
||||
|
||||
cacheInterval.initialEventsEndBookmark = initialEventsEndBookmark
|
||||
}
|
||||
}
|
||||
|
||||
// errWatcher implements watch.Interface to return a single error
|
||||
type errWatcher struct {
|
||||
result chan watch.Event
|
||||
|
141
vendor/k8s.io/apiserver/pkg/storage/cacher/store.go
generated
vendored
Normal file
141
vendor/k8s.io/apiserver/pkg/storage/cacher/store.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/btree"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
// btreeDegree defines the degree of btree storage.
|
||||
// Decided based on the benchmark results (below).
|
||||
// Selected the lowest degree from three options with best runtime (16,32,128).
|
||||
// │ 2 │ 4 │ 8 │ 16 │ 32 │ 64 │ 128 │
|
||||
// │ sec/op │ sec/op vs base │ sec/op vs base │ sec/op vs base │ sec/op vs base │ sec/op vs base │ sec/op vs base │
|
||||
// StoreCreateList/RV=NotOlderThan-24 473.0µ ± 11% 430.1µ ± 9% -9.08% (p=0.005 n=10) 427.9µ ± 6% -9.54% (p=0.002 n=10) 403.9µ ± 8% -14.62% (p=0.000 n=10) 401.0µ ± 4% -15.22% (p=0.000 n=10) 408.0µ ± 4% -13.75% (p=0.000 n=10) 385.9µ ± 4% -18.42% (p=0.000 n=10)
|
||||
// StoreCreateList/RV=ExactMatch-24 604.7µ ± 4% 596.7µ ± 8% ~ (p=0.529 n=10) 604.6µ ± 4% ~ (p=0.971 n=10) 601.1µ ± 4% ~ (p=0.853 n=10) 611.0µ ± 6% ~ (p=0.105 n=10) 598.2µ ± 5% ~ (p=0.579 n=10) 608.2µ ± 3% ~ (p=0.796 n=10)
|
||||
// StoreList/List=All/Paginate=False/RV=Empty-24 729.1µ ± 5% 692.9µ ± 3% -4.96% (p=0.002 n=10) 693.7µ ± 3% -4.86% (p=0.000 n=10) 688.3µ ± 1% -5.59% (p=0.000 n=10) 690.4µ ± 5% -5.31% (p=0.002 n=10) 689.7µ ± 2% -5.40% (p=0.000 n=10) 687.8µ ± 3% -5.67% (p=0.000 n=10)
|
||||
// StoreList/List=All/Paginate=True/RV=Empty-24 19.51m ± 2% 19.84m ± 2% ~ (p=0.105 n=10) 19.89m ± 3% ~ (p=0.190 n=10) 19.64m ± 4% ~ (p=0.853 n=10) 19.34m ± 4% ~ (p=0.481 n=10) 20.22m ± 4% +3.66% (p=0.007 n=10) 19.58m ± 4% ~ (p=0.912 n=10)
|
||||
// StoreList/List=Namespace/Paginate=False/RV=Empty-24 1.672m ± 4% 1.635m ± 2% ~ (p=0.247 n=10) 1.673m ± 5% ~ (p=0.631 n=10) 1.657m ± 2% ~ (p=0.971 n=10) 1.656m ± 4% ~ (p=0.739 n=10) 1.678m ± 2% ~ (p=0.631 n=10) 1.718m ± 8% ~ (p=0.105 n=10)
|
||||
// geomean 1.467m 1.420m -3.24% 1.430m -2.58% 1.403m -4.38% 1.402m -4.46% 1.417m -3.44% 1.403m -4.41%
|
||||
//
|
||||
// │ 2 │ 4 │ 8 │ 16 │ 32 │ 64 │ 128 │
|
||||
// │ B/op │ B/op vs base │ B/op vs base │ B/op vs base │ B/op vs base │ B/op vs base │ B/op vs base │
|
||||
// StoreCreateList/RV=NotOlderThan-24 98.58Ki ± 11% 101.33Ki ± 13% ~ (p=0.280 n=10) 99.80Ki ± 26% ~ (p=0.353 n=10) 109.63Ki ± 9% ~ (p=0.075 n=10) 112.56Ki ± 6% +14.18% (p=0.007 n=10) 114.41Ki ± 10% +16.05% (p=0.003 n=10) 115.06Ki ± 12% +16.72% (p=0.011 n=10)
|
||||
// StoreCreateList/RV=ExactMatch-24 117.1Ki ± 0% 117.5Ki ± 0% ~ (p=0.218 n=10) 116.9Ki ± 0% ~ (p=0.052 n=10) 117.3Ki ± 0% ~ (p=0.353 n=10) 116.9Ki ± 0% ~ (p=0.075 n=10) 117.0Ki ± 0% ~ (p=0.436 n=10) 117.0Ki ± 0% ~ (p=0.280 n=10)
|
||||
// StoreList/List=All/Paginate=False/RV=Empty-24 6.023Mi ± 0% 6.024Mi ± 0% +0.01% (p=0.037 n=10) 6.024Mi ± 0% ~ (p=0.493 n=10) 6.024Mi ± 0% +0.01% (p=0.035 n=10) 6.024Mi ± 0% ~ (p=0.247 n=10) 6.024Mi ± 0% ~ (p=0.247 n=10) 6.024Mi ± 0% ~ (p=0.315 n=10)
|
||||
// StoreList/List=All/Paginate=True/RV=Empty-24 64.22Mi ± 0% 64.21Mi ± 0% ~ (p=0.075 n=10) 64.23Mi ± 0% ~ (p=0.280 n=10) 64.21Mi ± 0% -0.02% (p=0.002 n=10) 64.22Mi ± 0% ~ (p=0.579 n=10) 64.22Mi ± 0% ~ (p=0.971 n=10) 64.22Mi ± 0% ~ (p=1.000 n=10)
|
||||
// StoreList/List=Namespace/Paginate=False/RV=Empty-24 8.177Mi ± 0% 8.178Mi ± 0% ~ (p=0.579 n=10) 8.177Mi ± 0% ~ (p=0.971 n=10) 8.179Mi ± 0% ~ (p=0.579 n=10) 8.178Mi ± 0% ~ (p=0.739 n=10) 8.179Mi ± 0% ~ (p=0.315 n=10) 8.176Mi ± 0% ~ (p=0.247 n=10)
|
||||
// geomean 2.034Mi 2.047Mi +0.61% 2.039Mi +0.22% 2.079Mi +2.19% 2.088Mi +2.66% 2.095Mi +3.01% 2.098Mi +3.12%
|
||||
//
|
||||
// │ 2 │ 4 │ 8 │ 16 │ 32 │ 64 │ 128 │
|
||||
// │ allocs/op │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │ allocs/op vs base │
|
||||
// StoreCreateList/RV=NotOlderThan-24 560.0 ± 0% 558.0 ± 0% -0.36% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10) 558.0 ± 0% -0.36% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10) 557.0 ± 0% -0.54% (p=0.000 n=10)
|
||||
// StoreCreateList/RV=ExactMatch-24 871.0 ± 0% 870.0 ± 0% -0.11% (p=0.038 n=10) 870.0 ± 0% -0.11% (p=0.004 n=10) 870.0 ± 0% -0.11% (p=0.005 n=10) 869.0 ± 0% -0.23% (p=0.000 n=10) 870.0 ± 0% -0.11% (p=0.001 n=10) 870.0 ± 0% -0.11% (p=0.000 n=10)
|
||||
// StoreList/List=All/Paginate=False/RV=Empty-24 351.0 ± 3% 358.0 ± 1% +1.99% (p=0.034 n=10) 352.5 ± 3% ~ (p=0.589 n=10) 358.5 ± 1% +2.14% (p=0.022 n=10) 356.5 ± 3% ~ (p=0.208 n=10) 355.0 ± 3% ~ (p=0.224 n=10) 355.0 ± 3% ~ (p=0.183 n=10)
|
||||
// StoreList/List=All/Paginate=True/RV=Empty-24 494.4k ± 0% 494.4k ± 0% ~ (p=0.424 n=10) 494.6k ± 0% +0.06% (p=0.000 n=10) 492.7k ± 0% -0.34% (p=0.000 n=10) 494.5k ± 0% +0.02% (p=0.009 n=10) 493.0k ± 0% -0.28% (p=0.000 n=10) 494.4k ± 0% ~ (p=0.424 n=10)
|
||||
// StoreList/List=Namespace/Paginate=False/RV=Empty-24 32.43k ± 0% 32.44k ± 0% ~ (p=0.579 n=10) 32.43k ± 0% ~ (p=0.971 n=10) 32.45k ± 0% ~ (p=0.517 n=10) 32.44k ± 0% ~ (p=0.670 n=10) 32.46k ± 0% ~ (p=0.256 n=10) 32.41k ± 0% ~ (p=0.247 n=10)
|
||||
// geomean 4.872k 4.887k +0.31% 4.870k -0.03% 4.885k +0.28% 4.880k +0.17% 4.875k +0.06% 4.876k +0.08%
|
||||
btreeDegree = 16
|
||||
)
|
||||
|
||||
type storeIndexer interface {
|
||||
Add(obj interface{}) error
|
||||
Update(obj interface{}) error
|
||||
Delete(obj interface{}) error
|
||||
List() []interface{}
|
||||
ListKeys() []string
|
||||
Get(obj interface{}) (item interface{}, exists bool, err error)
|
||||
GetByKey(key string) (item interface{}, exists bool, err error)
|
||||
Replace([]interface{}, string) error
|
||||
ByIndex(indexName, indexedValue string) ([]interface{}, error)
|
||||
}
|
||||
|
||||
type orderedLister interface {
|
||||
ListPrefix(prefix, continueKey string, limit int) (items []interface{}, hasMore bool)
|
||||
}
|
||||
|
||||
func newStoreIndexer(indexers *cache.Indexers) storeIndexer {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BtreeWatchCache) {
|
||||
return newThreadedBtreeStoreIndexer(storeElementIndexers(indexers), btreeDegree)
|
||||
}
|
||||
return cache.NewIndexer(storeElementKey, storeElementIndexers(indexers))
|
||||
}
|
||||
|
||||
// Computing a key of an object is generally non-trivial (it performs
|
||||
// e.g. validation underneath). Similarly computing object fields and
|
||||
// labels. To avoid computing them multiple times (to serve the event
|
||||
// in different List/Watch requests), in the underlying store we are
|
||||
// keeping structs (key, object, labels, fields).
|
||||
type storeElement struct {
|
||||
Key string
|
||||
Object runtime.Object
|
||||
Labels labels.Set
|
||||
Fields fields.Set
|
||||
}
|
||||
|
||||
func (t *storeElement) Less(than btree.Item) bool {
|
||||
return t.Key < than.(*storeElement).Key
|
||||
}
|
||||
|
||||
var _ btree.Item = (*storeElement)(nil)
|
||||
|
||||
func storeElementKey(obj interface{}) (string, error) {
|
||||
elem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("not a storeElement: %v", obj)
|
||||
}
|
||||
return elem.Key, nil
|
||||
}
|
||||
|
||||
func storeElementObject(obj interface{}) (runtime.Object, error) {
|
||||
elem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not a storeElement: %v", obj)
|
||||
}
|
||||
return elem.Object, nil
|
||||
}
|
||||
|
||||
func storeElementIndexFunc(objIndexFunc cache.IndexFunc) cache.IndexFunc {
|
||||
return func(obj interface{}) (strings []string, e error) {
|
||||
seo, err := storeElementObject(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return objIndexFunc(seo)
|
||||
}
|
||||
}
|
||||
|
||||
func storeElementIndexers(indexers *cache.Indexers) cache.Indexers {
|
||||
if indexers == nil {
|
||||
return cache.Indexers{}
|
||||
}
|
||||
ret := cache.Indexers{}
|
||||
for indexName, indexFunc := range *indexers {
|
||||
ret[indexName] = storeElementIndexFunc(indexFunc)
|
||||
}
|
||||
return ret
|
||||
}
|
393
vendor/k8s.io/apiserver/pkg/storage/cacher/store_btree.go
generated
vendored
Normal file
393
vendor/k8s.io/apiserver/pkg/storage/cacher/store_btree.go
generated
vendored
Normal file
@ -0,0 +1,393 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/btree"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// newThreadedBtreeStoreIndexer returns a storage for cacher by adding locking over the two 2 data structures:
|
||||
// * btree based storage for efficient LIST operation on prefix
|
||||
// * map based indexer for retrieving values by index.
|
||||
// This separation is used to allow independent snapshotting those two storages in the future.
|
||||
// Intention is to utilize btree for its cheap snapshots that don't require locking if don't mutate data.
|
||||
func newThreadedBtreeStoreIndexer(indexers cache.Indexers, degree int) *threadedStoreIndexer {
|
||||
return &threadedStoreIndexer{
|
||||
store: newBtreeStore(degree),
|
||||
indexer: newIndexer(indexers),
|
||||
}
|
||||
}
|
||||
|
||||
type threadedStoreIndexer struct {
|
||||
lock sync.RWMutex
|
||||
store btreeStore
|
||||
indexer indexer
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) Add(obj interface{}) error {
|
||||
return si.addOrUpdate(obj)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) Update(obj interface{}) error {
|
||||
return si.addOrUpdate(obj)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) addOrUpdate(obj interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("obj cannot be nil")
|
||||
}
|
||||
newElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("obj not a storeElement: %#v", obj)
|
||||
}
|
||||
si.lock.Lock()
|
||||
defer si.lock.Unlock()
|
||||
oldElem := si.store.addOrUpdateElem(newElem)
|
||||
return si.indexer.updateElem(newElem.Key, oldElem, newElem)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) Delete(obj interface{}) error {
|
||||
storeElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("obj not a storeElement: %#v", obj)
|
||||
}
|
||||
si.lock.Lock()
|
||||
defer si.lock.Unlock()
|
||||
oldObj := si.store.deleteElem(storeElem)
|
||||
if oldObj == nil {
|
||||
return nil
|
||||
}
|
||||
return si.indexer.updateElem(storeElem.Key, oldObj.(*storeElement), nil)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) List() []interface{} {
|
||||
si.lock.RLock()
|
||||
defer si.lock.RUnlock()
|
||||
return si.store.List()
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) ListPrefix(prefix, continueKey string, limit int) ([]interface{}, bool) {
|
||||
si.lock.RLock()
|
||||
defer si.lock.RUnlock()
|
||||
return si.store.ListPrefix(prefix, continueKey, limit)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) ListKeys() []string {
|
||||
si.lock.RLock()
|
||||
defer si.lock.RUnlock()
|
||||
return si.store.ListKeys()
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
si.lock.RLock()
|
||||
defer si.lock.RUnlock()
|
||||
return si.store.Get(obj)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
si.lock.RLock()
|
||||
defer si.lock.RUnlock()
|
||||
return si.store.GetByKey(key)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) Replace(objs []interface{}, resourceVersion string) error {
|
||||
si.lock.Lock()
|
||||
defer si.lock.Unlock()
|
||||
err := si.store.Replace(objs, resourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return si.indexer.Replace(objs, resourceVersion)
|
||||
}
|
||||
|
||||
func (si *threadedStoreIndexer) ByIndex(indexName, indexValue string) ([]interface{}, error) {
|
||||
si.lock.RLock()
|
||||
defer si.lock.RUnlock()
|
||||
return si.indexer.ByIndex(indexName, indexValue)
|
||||
}
|
||||
|
||||
func newBtreeStore(degree int) btreeStore {
|
||||
return btreeStore{
|
||||
tree: btree.New(degree),
|
||||
}
|
||||
}
|
||||
|
||||
type btreeStore struct {
|
||||
tree *btree.BTree
|
||||
}
|
||||
|
||||
func (s *btreeStore) Add(obj interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("obj cannot be nil")
|
||||
}
|
||||
storeElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("obj not a storeElement: %#v", obj)
|
||||
}
|
||||
s.addOrUpdateElem(storeElem)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *btreeStore) Update(obj interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("obj cannot be nil")
|
||||
}
|
||||
storeElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("obj not a storeElement: %#v", obj)
|
||||
}
|
||||
s.addOrUpdateElem(storeElem)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *btreeStore) Delete(obj interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("obj cannot be nil")
|
||||
}
|
||||
storeElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("obj not a storeElement: %#v", obj)
|
||||
}
|
||||
s.deleteElem(storeElem)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *btreeStore) deleteElem(storeElem *storeElement) interface{} {
|
||||
return s.tree.Delete(storeElem)
|
||||
}
|
||||
|
||||
func (s *btreeStore) List() []interface{} {
|
||||
items := make([]interface{}, 0, s.tree.Len())
|
||||
s.tree.Ascend(func(i btree.Item) bool {
|
||||
items = append(items, i.(interface{}))
|
||||
return true
|
||||
})
|
||||
return items
|
||||
}
|
||||
|
||||
func (s *btreeStore) ListKeys() []string {
|
||||
items := make([]string, 0, s.tree.Len())
|
||||
s.tree.Ascend(func(i btree.Item) bool {
|
||||
items = append(items, i.(*storeElement).Key)
|
||||
return true
|
||||
})
|
||||
return items
|
||||
}
|
||||
|
||||
func (s *btreeStore) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
storeElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("obj is not a storeElement")
|
||||
}
|
||||
item = s.tree.Get(storeElem)
|
||||
if item == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
return item, true, nil
|
||||
}
|
||||
|
||||
func (s *btreeStore) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
return s.getByKey(key)
|
||||
}
|
||||
|
||||
func (s *btreeStore) Replace(objs []interface{}, _ string) error {
|
||||
s.tree.Clear(false)
|
||||
for _, obj := range objs {
|
||||
storeElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("obj not a storeElement: %#v", obj)
|
||||
}
|
||||
s.addOrUpdateElem(storeElem)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addOrUpdateLocked assumes a lock is held and is used for Add
|
||||
// and Update operations.
|
||||
func (s *btreeStore) addOrUpdateElem(storeElem *storeElement) *storeElement {
|
||||
oldObj := s.tree.ReplaceOrInsert(storeElem)
|
||||
if oldObj == nil {
|
||||
return nil
|
||||
}
|
||||
return oldObj.(*storeElement)
|
||||
}
|
||||
|
||||
func (s *btreeStore) getByKey(key string) (item interface{}, exists bool, err error) {
|
||||
keyElement := &storeElement{Key: key}
|
||||
item = s.tree.Get(keyElement)
|
||||
return item, item != nil, nil
|
||||
}
|
||||
|
||||
func (s *btreeStore) ListPrefix(prefix, continueKey string, limit int) ([]interface{}, bool) {
|
||||
if limit < 0 {
|
||||
return nil, false
|
||||
}
|
||||
if continueKey == "" {
|
||||
continueKey = prefix
|
||||
}
|
||||
var result []interface{}
|
||||
var hasMore bool
|
||||
if limit == 0 {
|
||||
limit = math.MaxInt
|
||||
}
|
||||
s.tree.AscendGreaterOrEqual(&storeElement{Key: continueKey}, func(i btree.Item) bool {
|
||||
elementKey := i.(*storeElement).Key
|
||||
if !strings.HasPrefix(elementKey, prefix) {
|
||||
return false
|
||||
}
|
||||
// TODO: Might be worth to lookup one more item to provide more accurate HasMore.
|
||||
if len(result) >= limit {
|
||||
hasMore = true
|
||||
return false
|
||||
}
|
||||
result = append(result, i.(interface{}))
|
||||
return true
|
||||
})
|
||||
return result, hasMore
|
||||
}
|
||||
|
||||
func (s *btreeStore) Count(prefix, continueKey string) (count int) {
|
||||
if continueKey == "" {
|
||||
continueKey = prefix
|
||||
}
|
||||
s.tree.AscendGreaterOrEqual(&storeElement{Key: continueKey}, func(i btree.Item) bool {
|
||||
elementKey := i.(*storeElement).Key
|
||||
if !strings.HasPrefix(elementKey, prefix) {
|
||||
return false
|
||||
}
|
||||
count++
|
||||
return true
|
||||
})
|
||||
return count
|
||||
}
|
||||
|
||||
// newIndexer returns a indexer similar to storeIndex from client-go/tools/cache.
|
||||
// TODO: Unify the indexer code with client-go/cache package.
|
||||
// Major differences is type of values stored and their mutability:
|
||||
// * Indexer in client-go stores object keys, that are not mutable.
|
||||
// * Indexer in cacher stores whole objects, which is mutable.
|
||||
// Indexer in client-go uses keys as it is used in conjunction with map[key]value
|
||||
// allowing for fast value retrieval, while btree used in cacher would provide additional overhead.
|
||||
// Difference in mutability of stored values is used for optimizing some operations in client-go Indexer.
|
||||
func newIndexer(indexers cache.Indexers) indexer {
|
||||
return indexer{
|
||||
indices: map[string]map[string]map[string]*storeElement{},
|
||||
indexers: indexers,
|
||||
}
|
||||
}
|
||||
|
||||
type indexer struct {
|
||||
indices map[string]map[string]map[string]*storeElement
|
||||
indexers cache.Indexers
|
||||
}
|
||||
|
||||
func (i *indexer) ByIndex(indexName, indexValue string) ([]interface{}, error) {
|
||||
indexFunc := i.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("index with name %s does not exist", indexName)
|
||||
}
|
||||
index := i.indices[indexName]
|
||||
set := index[indexValue]
|
||||
list := make([]interface{}, 0, len(set))
|
||||
for _, obj := range set {
|
||||
list = append(list, obj)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (i *indexer) Replace(objs []interface{}, resourceVersion string) error {
|
||||
i.indices = map[string]map[string]map[string]*storeElement{}
|
||||
for _, obj := range objs {
|
||||
storeElem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return fmt.Errorf("obj not a storeElement: %#v", obj)
|
||||
}
|
||||
err := i.updateElem(storeElem.Key, nil, storeElem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *indexer) updateElem(key string, oldObj, newObj *storeElement) (err error) {
|
||||
var oldIndexValues, indexValues []string
|
||||
for name, indexFunc := range i.indexers {
|
||||
if oldObj != nil {
|
||||
oldIndexValues, err = indexFunc(oldObj)
|
||||
} else {
|
||||
oldIndexValues = oldIndexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to calculate an index entry for key %q on index %q: %w", key, name, err)
|
||||
}
|
||||
if newObj != nil {
|
||||
indexValues, err = indexFunc(newObj)
|
||||
} else {
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to calculate an index entry for key %q on index %q: %w", key, name, err)
|
||||
}
|
||||
index := i.indices[name]
|
||||
if index == nil {
|
||||
index = map[string]map[string]*storeElement{}
|
||||
i.indices[name] = index
|
||||
}
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
// We optimize for the most common case where indexFunc returns a single value which has not been changed
|
||||
i.add(key, indexValues[0], newObj, index)
|
||||
continue
|
||||
}
|
||||
for _, value := range oldIndexValues {
|
||||
i.delete(key, value, index)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
i.add(key, value, newObj, index)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *indexer) add(key, value string, obj *storeElement, index map[string]map[string]*storeElement) {
|
||||
set := index[value]
|
||||
if set == nil {
|
||||
set = map[string]*storeElement{}
|
||||
index[value] = set
|
||||
}
|
||||
set[key] = obj
|
||||
}
|
||||
|
||||
func (i *indexer) delete(key, value string, index map[string]map[string]*storeElement) {
|
||||
set := index[value]
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
delete(set, key)
|
||||
// If we don's delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, value)
|
||||
}
|
||||
}
|
115
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go
generated
vendored
115
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go
generated
vendored
@ -83,55 +83,6 @@ type watchCacheEvent struct {
|
||||
RecordTime time.Time
|
||||
}
|
||||
|
||||
// Computing a key of an object is generally non-trivial (it performs
|
||||
// e.g. validation underneath). Similarly computing object fields and
|
||||
// labels. To avoid computing them multiple times (to serve the event
|
||||
// in different List/Watch requests), in the underlying store we are
|
||||
// keeping structs (key, object, labels, fields).
|
||||
type storeElement struct {
|
||||
Key string
|
||||
Object runtime.Object
|
||||
Labels labels.Set
|
||||
Fields fields.Set
|
||||
}
|
||||
|
||||
func storeElementKey(obj interface{}) (string, error) {
|
||||
elem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("not a storeElement: %v", obj)
|
||||
}
|
||||
return elem.Key, nil
|
||||
}
|
||||
|
||||
func storeElementObject(obj interface{}) (runtime.Object, error) {
|
||||
elem, ok := obj.(*storeElement)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not a storeElement: %v", obj)
|
||||
}
|
||||
return elem.Object, nil
|
||||
}
|
||||
|
||||
func storeElementIndexFunc(objIndexFunc cache.IndexFunc) cache.IndexFunc {
|
||||
return func(obj interface{}) (strings []string, e error) {
|
||||
seo, err := storeElementObject(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return objIndexFunc(seo)
|
||||
}
|
||||
}
|
||||
|
||||
func storeElementIndexers(indexers *cache.Indexers) cache.Indexers {
|
||||
if indexers == nil {
|
||||
return cache.Indexers{}
|
||||
}
|
||||
ret := cache.Indexers{}
|
||||
for indexName, indexFunc := range *indexers {
|
||||
ret[indexName] = storeElementIndexFunc(indexFunc)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// watchCache implements a Store interface.
|
||||
// However, it depends on the elements implementing runtime.Object interface.
|
||||
//
|
||||
@ -173,7 +124,7 @@ type watchCache struct {
|
||||
// history" i.e. from the moment just after the newest cached watched event.
|
||||
// It is necessary to effectively allow clients to start watching at now.
|
||||
// NOTE: We assume that <store> is thread-safe.
|
||||
store cache.Indexer
|
||||
store storeIndexer
|
||||
|
||||
// ResourceVersion up to which the watchCache is propagated.
|
||||
resourceVersion uint64
|
||||
@ -223,7 +174,7 @@ func newWatchCache(
|
||||
upperBoundCapacity: defaultUpperBoundCapacity,
|
||||
startIndex: 0,
|
||||
endIndex: 0,
|
||||
store: cache.NewIndexer(storeElementKey, storeElementIndexers(indexers)),
|
||||
store: newStoreIndexer(indexers),
|
||||
resourceVersion: 0,
|
||||
listResourceVersion: 0,
|
||||
eventHandler: eventHandler,
|
||||
@ -501,29 +452,7 @@ func (s sortableStoreElements) Swap(i, j int) {
|
||||
|
||||
// WaitUntilFreshAndList returns list of pointers to `storeElement` objects along
|
||||
// with their ResourceVersion and the name of the index, if any, that was used.
|
||||
func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) {
|
||||
items, rv, index, err := w.waitUntilFreshAndListItems(ctx, resourceVersion, key, matchValues)
|
||||
if err != nil {
|
||||
return nil, 0, "", err
|
||||
}
|
||||
|
||||
var result []interface{}
|
||||
for _, item := range items {
|
||||
elem, ok := item.(*storeElement)
|
||||
if !ok {
|
||||
return nil, 0, "", fmt.Errorf("non *storeElement returned from storage: %v", item)
|
||||
}
|
||||
if !hasPathPrefix(elem.Key, key) {
|
||||
continue
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
|
||||
sort.Sort(sortableStoreElements(result))
|
||||
return result, rv, index, nil
|
||||
}
|
||||
|
||||
func (w *watchCache) waitUntilFreshAndListItems(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) {
|
||||
func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) {
|
||||
requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && requestWatchProgressSupported && w.notFresh(resourceVersion) {
|
||||
w.waitingUntilFresh.Add()
|
||||
@ -537,21 +466,46 @@ func (w *watchCache) waitUntilFreshAndListItems(ctx context.Context, resourceVer
|
||||
if err != nil {
|
||||
return result, rv, index, err
|
||||
}
|
||||
|
||||
result, rv, index, err = func() ([]interface{}, uint64, string, error) {
|
||||
var prefixFilteredAndOrdered bool
|
||||
result, rv, index, prefixFilteredAndOrdered, err = func() ([]interface{}, uint64, string, bool, error) {
|
||||
// This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only
|
||||
// requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we
|
||||
// want - they will be filtered out later. The fact that we return less things is only further performance improvement.
|
||||
// TODO: if multiple indexes match, return the one with the fewest items, so as to do as much filtering as possible.
|
||||
for _, matchValue := range matchValues {
|
||||
if result, err := w.store.ByIndex(matchValue.IndexName, matchValue.Value); err == nil {
|
||||
return result, w.resourceVersion, matchValue.IndexName, nil
|
||||
return result, w.resourceVersion, matchValue.IndexName, false, nil
|
||||
}
|
||||
}
|
||||
return w.store.List(), w.resourceVersion, "", nil
|
||||
if store, ok := w.store.(orderedLister); ok {
|
||||
result, _ := store.ListPrefix(key, "", 0)
|
||||
return result, w.resourceVersion, "", true, nil
|
||||
}
|
||||
return w.store.List(), w.resourceVersion, "", false, nil
|
||||
}()
|
||||
if !prefixFilteredAndOrdered {
|
||||
result, err = filterPrefixAndOrder(key, result)
|
||||
if err != nil {
|
||||
return nil, 0, "", err
|
||||
}
|
||||
}
|
||||
return result, w.resourceVersion, index, nil
|
||||
}
|
||||
|
||||
return result, rv, index, err
|
||||
func filterPrefixAndOrder(prefix string, items []interface{}) ([]interface{}, error) {
|
||||
var result []interface{}
|
||||
for _, item := range items {
|
||||
elem, ok := item.(*storeElement)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("non *storeElement returned from storage: %v", item)
|
||||
}
|
||||
if !hasPathPrefix(elem.Key, prefix) {
|
||||
continue
|
||||
}
|
||||
result = append(result, item)
|
||||
}
|
||||
sort.Sort(sortableStoreElements(result))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (w *watchCache) notFresh(resourceVersion uint64) bool {
|
||||
@ -739,6 +693,7 @@ func (w *watchCache) isIndexValidLocked(index int) bool {
|
||||
// be called under the watchCache lock.
|
||||
func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, key string, opts storage.ListOptions) (*watchCacheInterval, error) {
|
||||
_, matchesSingle := opts.Predicate.MatchesSingle()
|
||||
matchesSingle = matchesSingle && !opts.Recursive
|
||||
if opts.SendInitialEvents != nil && *opts.SendInitialEvents {
|
||||
return w.getIntervalFromStoreLocked(key, matchesSingle)
|
||||
}
|
||||
@ -788,7 +743,7 @@ func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, key string,
|
||||
indexerFunc := func(i int) *watchCacheEvent {
|
||||
return w.cache[i%w.capacity]
|
||||
}
|
||||
ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, w.RWMutex.RLocker())
|
||||
ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, resourceVersion, w.RWMutex.RLocker())
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
|
30
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go
generated
vendored
30
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// watchCacheInterval serves as an abstraction over a source
|
||||
@ -92,26 +91,34 @@ type watchCacheInterval struct {
|
||||
// lock on each invocation of Next().
|
||||
buffer *watchCacheIntervalBuffer
|
||||
|
||||
// resourceVersion is the resourceVersion from which
|
||||
// the interval was constructed.
|
||||
resourceVersion uint64
|
||||
|
||||
// lock effectively protects access to the underlying source
|
||||
// of events through - indexer and indexValidator.
|
||||
//
|
||||
// Given that indexer and indexValidator only read state, if
|
||||
// possible, Locker obtained through RLocker() is provided.
|
||||
lock sync.Locker
|
||||
|
||||
// initialEventsEndBookmark will be sent after sending all events in cacheInterval
|
||||
initialEventsEndBookmark *watchCacheEvent
|
||||
}
|
||||
|
||||
type attrFunc func(runtime.Object) (labels.Set, fields.Set, error)
|
||||
type indexerFunc func(int) *watchCacheEvent
|
||||
type indexValidator func(int) bool
|
||||
|
||||
func newCacheInterval(startIndex, endIndex int, indexer indexerFunc, indexValidator indexValidator, locker sync.Locker) *watchCacheInterval {
|
||||
func newCacheInterval(startIndex, endIndex int, indexer indexerFunc, indexValidator indexValidator, resourceVersion uint64, locker sync.Locker) *watchCacheInterval {
|
||||
return &watchCacheInterval{
|
||||
startIndex: startIndex,
|
||||
endIndex: endIndex,
|
||||
indexer: indexer,
|
||||
indexValidator: indexValidator,
|
||||
buffer: &watchCacheIntervalBuffer{buffer: make([]*watchCacheEvent, bufferSize)},
|
||||
lock: locker,
|
||||
startIndex: startIndex,
|
||||
endIndex: endIndex,
|
||||
indexer: indexer,
|
||||
indexValidator: indexValidator,
|
||||
buffer: &watchCacheIntervalBuffer{buffer: make([]*watchCacheEvent, bufferSize)},
|
||||
resourceVersion: resourceVersion,
|
||||
lock: locker,
|
||||
}
|
||||
}
|
||||
|
||||
@ -133,7 +140,7 @@ func (s sortableWatchCacheEvents) Swap(i, j int) {
|
||||
// returned by Next() need to be events from a List() done on the underlying store of
|
||||
// the watch cache.
|
||||
// The items returned in the interval will be sorted by Key.
|
||||
func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc, key string, matchesSingle bool) (*watchCacheInterval, error) {
|
||||
func newCacheIntervalFromStore(resourceVersion uint64, store storeIndexer, getAttrsFunc attrFunc, key string, matchesSingle bool) (*watchCacheInterval, error) {
|
||||
buffer := &watchCacheIntervalBuffer{}
|
||||
var allItems []interface{}
|
||||
|
||||
@ -173,8 +180,9 @@ func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getA
|
||||
ci := &watchCacheInterval{
|
||||
startIndex: 0,
|
||||
// Simulate that we already have all the events we're looking for.
|
||||
endIndex: 0,
|
||||
buffer: buffer,
|
||||
endIndex: 0,
|
||||
buffer: buffer,
|
||||
resourceVersion: resourceVersion,
|
||||
}
|
||||
|
||||
return ci, nil
|
||||
|
Reference in New Issue
Block a user