mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
@ -100,7 +99,6 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
|
20
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
20
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
@ -53,24 +53,8 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
|
||||
// ListAllByNamespace used to list items belongs to namespace from Indexer.
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ListAll(indexer, selector, appendFn)
|
||||
}
|
||||
|
||||
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
|
||||
@ -89,6 +73,8 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
selectAll := selector.Empty()
|
||||
for _, m := range items {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
|
163
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
163
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -135,7 +135,9 @@ type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// It returns a registration handle for the handler that can be used to remove
|
||||
// the handler again.
|
||||
AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer with the requested resync period; zero means
|
||||
// this handler does not care about resyncs. The resync operation
|
||||
@ -150,7 +152,13 @@ type SharedInformer interface {
|
||||
// between any two resyncs may be longer than the nominal period
|
||||
// because the implementation takes time to do work and there may
|
||||
// be competing load and scheduling noise.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// It returns a registration handle for the handler that can be used to remove
|
||||
// the handler again and an error if the handler cannot be added.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error)
|
||||
// RemoveEventHandler removes a formerly added event handler given by
|
||||
// its registration handle.
|
||||
// This function is guaranteed to be idempotent, and thread-safe.
|
||||
RemoveEventHandler(handle ResourceEventHandlerRegistration) error
|
||||
// GetStore returns the informer's local cache as a Store.
|
||||
GetStore() Store
|
||||
// GetController is deprecated, it does nothing useful
|
||||
@ -195,8 +203,18 @@ type SharedInformer interface {
|
||||
// transform before mutating it at all and returning the copy to prevent
|
||||
// data races.
|
||||
SetTransform(handler TransformFunc) error
|
||||
|
||||
// IsStopped reports whether the informer has already been stopped.
|
||||
// Adding event handlers to already stopped informers is not possible.
|
||||
// An informer already stopped will never be started again.
|
||||
IsStopped() bool
|
||||
}
|
||||
|
||||
// Opaque interface representing the registration of ResourceEventHandler for
|
||||
// a SharedInformer. Must be supplied back to the same SharedInformer's
|
||||
// `RemoveEventHandler` to unregister the handlers.
|
||||
type ResourceEventHandlerRegistration interface{}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
@ -492,8 +510,8 @@ func (s *sharedIndexInformer) GetController() Controller {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
|
||||
s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) {
|
||||
return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
|
||||
}
|
||||
|
||||
func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
||||
@ -513,13 +531,12 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
||||
|
||||
const minimumResyncPeriod = 1 * time.Second
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.stopped {
|
||||
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
return
|
||||
return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler)
|
||||
}
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
@ -545,8 +562,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
|
||||
|
||||
if !s.started {
|
||||
s.processor.addListener(listener)
|
||||
return
|
||||
return s.processor.addListener(listener), nil
|
||||
}
|
||||
|
||||
// in order to safely join, we have to
|
||||
@ -557,10 +573,11 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
s.processor.addListener(listener)
|
||||
handle := s.processor.addListener(listener)
|
||||
for _, item := range s.indexer.List() {
|
||||
listener.add(addNotification{newObj: item})
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
@ -610,6 +627,26 @@ func (s *sharedIndexInformer) OnDelete(old interface{}) {
|
||||
s.processor.distribute(deleteNotification{oldObj: old}, false)
|
||||
}
|
||||
|
||||
// IsStopped reports whether the informer has already been stopped
|
||||
func (s *sharedIndexInformer) IsStopped() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.stopped
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
// in order to safely remove, we have to
|
||||
// 1. stop sending add/update/delete notifications
|
||||
// 2. remove and stop listener
|
||||
// 3. unblock
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
return s.processor.removeListener(handle)
|
||||
}
|
||||
|
||||
// sharedProcessor has a collection of processorListener and can
|
||||
// distribute a notification object to its listeners. There are two
|
||||
// kinds of distribute operations. The sync distributions go to a
|
||||
@ -619,39 +656,85 @@ func (s *sharedIndexInformer) OnDelete(old interface{}) {
|
||||
type sharedProcessor struct {
|
||||
listenersStarted bool
|
||||
listenersLock sync.RWMutex
|
||||
listeners []*processorListener
|
||||
syncingListeners []*processorListener
|
||||
clock clock.Clock
|
||||
wg wait.Group
|
||||
// Map from listeners to whether or not they are currently syncing
|
||||
listeners map[*processorListener]bool
|
||||
clock clock.Clock
|
||||
wg wait.Group
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListener(listener *processorListener) {
|
||||
func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
if p.listeners == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if result, ok := registration.(*processorListener); ok {
|
||||
if _, exists := p.listeners[result]; exists {
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.addListenerLocked(listener)
|
||||
if p.listeners == nil {
|
||||
p.listeners = make(map[*processorListener]bool)
|
||||
}
|
||||
|
||||
p.listeners[listener] = true
|
||||
|
||||
if p.listenersStarted {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
|
||||
return listener
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
|
||||
p.listeners = append(p.listeners, listener)
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
listener, ok := handle.(*processorListener)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid key type %t", handle)
|
||||
} else if p.listeners == nil {
|
||||
// No listeners are registered, do nothing
|
||||
return nil
|
||||
} else if _, exists := p.listeners[listener]; !exists {
|
||||
// Listener is not registered, just do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(p.listeners, listener)
|
||||
|
||||
if p.listenersStarted {
|
||||
close(listener.addCh)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
if sync {
|
||||
for _, listener := range p.syncingListeners {
|
||||
for listener, isSyncing := range p.listeners {
|
||||
switch {
|
||||
case !sync:
|
||||
// non-sync messages are delivered to every listener
|
||||
listener.add(obj)
|
||||
}
|
||||
} else {
|
||||
for _, listener := range p.listeners {
|
||||
case isSyncing:
|
||||
// sync messages are delivered to every syncing listener
|
||||
listener.add(obj)
|
||||
default:
|
||||
// skipping a sync obj for a non-syncing listener
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -660,18 +743,27 @@ func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
func() {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
for listener := range p.listeners {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
p.listenersStarted = true
|
||||
}()
|
||||
<-stopCh
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
for listener := range p.listeners {
|
||||
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
|
||||
}
|
||||
|
||||
// Wipe out list of listeners since they are now closed
|
||||
// (processorListener cannot be re-used)
|
||||
p.listeners = nil
|
||||
|
||||
// Reset to false since no listeners are running
|
||||
p.listenersStarted = false
|
||||
|
||||
p.wg.Wait() // Wait for all .pop() and .run() to stop
|
||||
}
|
||||
|
||||
@ -681,16 +773,16 @@ func (p *sharedProcessor) shouldResync() bool {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.syncingListeners = []*processorListener{}
|
||||
|
||||
resyncNeeded := false
|
||||
now := p.clock.Now()
|
||||
for _, listener := range p.listeners {
|
||||
for listener := range p.listeners {
|
||||
// need to loop through all the listeners to see if they need to resync so we can prepare any
|
||||
// listeners that are going to be resyncing.
|
||||
if listener.shouldResync(now) {
|
||||
shouldResync := listener.shouldResync(now)
|
||||
p.listeners[listener] = shouldResync
|
||||
|
||||
if shouldResync {
|
||||
resyncNeeded = true
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
listener.determineNextResync(now)
|
||||
}
|
||||
}
|
||||
@ -701,8 +793,9 @@ func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Durati
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
for _, listener := range p.listeners {
|
||||
resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod)
|
||||
for listener := range p.listeners {
|
||||
resyncPeriod := determineResyncPeriod(
|
||||
listener.requestedResyncPeriod, resyncCheckPeriod)
|
||||
listener.setResyncPeriod(resyncPeriod)
|
||||
}
|
||||
}
|
||||
|
303
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
303
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@ -59,15 +59,159 @@ type ThreadSafeStore interface {
|
||||
Resync() error
|
||||
}
|
||||
|
||||
// storeIndex implements the indexing functionality for Store interface
|
||||
type storeIndex struct {
|
||||
// indexers maps a name to an IndexFunc
|
||||
indexers Indexers
|
||||
// indices maps a name to an Index
|
||||
indices Indices
|
||||
}
|
||||
|
||||
func (i *storeIndex) reset() {
|
||||
i.indices = Indices{}
|
||||
}
|
||||
|
||||
func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) {
|
||||
indexFunc := i.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
indexedValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := i.indices[indexName]
|
||||
|
||||
var storeKeySet sets.String
|
||||
if len(indexedValues) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
storeKeySet = index[indexedValues[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
storeKeySet = sets.String{}
|
||||
for _, indexedValue := range indexedValues {
|
||||
for key := range index[indexedValue] {
|
||||
storeKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storeKeySet, nil
|
||||
}
|
||||
|
||||
func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) {
|
||||
indexFunc := i.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
index := i.indices[indexName]
|
||||
return index[indexedValue], nil
|
||||
}
|
||||
|
||||
func (i *storeIndex) getIndexValues(indexName string) []string {
|
||||
index := i.indices[indexName]
|
||||
names := make([]string, 0, len(index))
|
||||
for key := range index {
|
||||
names = append(names, key)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (i *storeIndex) addIndexers(newIndexers Indexers) error {
|
||||
oldKeys := sets.StringKeySet(i.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
|
||||
if oldKeys.HasAny(newKeys.List()...) {
|
||||
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
|
||||
}
|
||||
|
||||
for k, v := range newIndexers {
|
||||
i.indexers[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes:
|
||||
// - for create you must provide only the newObj
|
||||
// - for update you must provide both the oldObj and the newObj
|
||||
// - for delete you must provide only the oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
var oldIndexValues, indexValues []string
|
||||
var err error
|
||||
for name, indexFunc := range i.indexers {
|
||||
if oldObj != nil {
|
||||
oldIndexValues, err = indexFunc(oldObj)
|
||||
} else {
|
||||
oldIndexValues = oldIndexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
if newObj != nil {
|
||||
indexValues, err = indexFunc(newObj)
|
||||
} else {
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := i.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
i.indices[name] = index
|
||||
}
|
||||
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
// We optimize for the most common case where indexFunc returns a single value which has not been changed
|
||||
continue
|
||||
}
|
||||
|
||||
for _, value := range oldIndexValues {
|
||||
i.deleteKeyFromIndex(key, value, index)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
i.addKeyToIndex(key, value, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
|
||||
func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
set.Delete(key)
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
|
||||
// threadSafeMap implements ThreadSafeStore
|
||||
type threadSafeMap struct {
|
||||
lock sync.RWMutex
|
||||
items map[string]interface{}
|
||||
|
||||
// indexers maps a name to an IndexFunc
|
||||
indexers Indexers
|
||||
// indices maps a name to an Index
|
||||
indices Indices
|
||||
// index implements the indexing functionality
|
||||
index *storeIndex
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Add(key string, obj interface{}) {
|
||||
@ -79,14 +223,14 @@ func (c *threadSafeMap) Update(key string, obj interface{}) {
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
c.index.updateIndices(oldObject, obj, key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Delete(key string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if obj, exists := c.items[key]; exists {
|
||||
c.updateIndices(obj, nil, key)
|
||||
c.index.updateIndices(obj, nil, key)
|
||||
delete(c.items, key)
|
||||
}
|
||||
}
|
||||
@ -126,9 +270,9 @@ func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion st
|
||||
c.items = items
|
||||
|
||||
// rebuild any index
|
||||
c.indices = Indices{}
|
||||
c.index.reset()
|
||||
for key, item := range c.items {
|
||||
c.updateIndices(nil, item, key)
|
||||
c.index.updateIndices(nil, item, key)
|
||||
}
|
||||
}
|
||||
|
||||
@ -138,32 +282,10 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
indexedValues, err := indexFunc(obj)
|
||||
storeKeySet, err := c.index.getKeysFromIndex(indexName, obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
var storeKeySet sets.String
|
||||
if len(indexedValues) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
storeKeySet = index[indexedValues[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
storeKeySet = sets.String{}
|
||||
for _, indexedValue := range indexedValues {
|
||||
for key := range index[indexedValue] {
|
||||
storeKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list := make([]interface{}, 0, storeKeySet.Len())
|
||||
for storeKey := range storeKeySet {
|
||||
@ -177,14 +299,10 @@ func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{},
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
set, err := c.index.getKeysByIndex(indexName, indexedValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexedValue]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for key := range set {
|
||||
list = append(list, c.items[key])
|
||||
@ -199,14 +317,10 @@ func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, err
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
set, err := c.index.getKeysByIndex(indexName, indexedValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexedValue]
|
||||
return set.List(), nil
|
||||
}
|
||||
|
||||
@ -214,16 +328,11 @@ func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
index := c.indices[indexName]
|
||||
names := make([]string, 0, len(index))
|
||||
for key := range index {
|
||||
names = append(names, key)
|
||||
}
|
||||
return names
|
||||
return c.index.getIndexValues(indexName)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) GetIndexers() Indexers {
|
||||
return c.indexers
|
||||
return c.index.indexers
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
@ -234,87 +343,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
return fmt.Errorf("cannot add indexers to running index")
|
||||
}
|
||||
|
||||
oldKeys := sets.StringKeySet(c.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
|
||||
if oldKeys.HasAny(newKeys.List()...) {
|
||||
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
|
||||
}
|
||||
|
||||
for k, v := range newIndexers {
|
||||
c.indexers[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes:
|
||||
// - for create you must provide only the newObj
|
||||
// - for update you must provide both the oldObj and the newObj
|
||||
// - for delete you must provide only the oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
var oldIndexValues, indexValues []string
|
||||
var err error
|
||||
for name, indexFunc := range c.indexers {
|
||||
if oldObj != nil {
|
||||
oldIndexValues, err = indexFunc(oldObj)
|
||||
} else {
|
||||
oldIndexValues = oldIndexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
if newObj != nil {
|
||||
indexValues, err = indexFunc(newObj)
|
||||
} else {
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
c.indices[name] = index
|
||||
}
|
||||
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
// We optimize for the most common case where indexFunc returns a single value which has not been changed
|
||||
continue
|
||||
}
|
||||
|
||||
for _, value := range oldIndexValues {
|
||||
c.deleteKeyFromIndex(key, value, index)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
c.addKeyToIndex(key, value, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
set.Delete(key)
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
return c.index.addIndexers(newIndexers)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Resync() error {
|
||||
@ -325,8 +354,10 @@ func (c *threadSafeMap) Resync() error {
|
||||
// NewThreadSafeStore creates a new instance of ThreadSafeStore.
|
||||
func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
|
||||
return &threadSafeMap{
|
||||
items: map[string]interface{}{},
|
||||
indexers: indexers,
|
||||
indices: indices,
|
||||
items: map[string]interface{}{},
|
||||
index: &storeIndex{
|
||||
indexers: indexers,
|
||||
indices: indices,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user