mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for E2E framework
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
380
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
Normal file
380
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
Normal file
@ -0,0 +1,380 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects - has to be a DeltaFIFO due to
|
||||
// assumptions in the implementation. Your Process() function
|
||||
// should accept the output of this Queue's Pop() method.
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
ListerWatcher
|
||||
|
||||
// Something that can process your objects.
|
||||
Process ProcessFunc
|
||||
|
||||
// The type of your objects.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// Reprocess everything at least this often.
|
||||
// Note that if it takes longer for you to clear the queue than this
|
||||
// period, you will end up processing items in the order determined
|
||||
// by FIFO.Replace(). Currently, this is random. If this is a
|
||||
// problem, we can change that replacement policy to append new
|
||||
// things to the end of the queue instead of replacing the entire
|
||||
// queue.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
// ShouldResync, if specified, is invoked when the controller's reflector determines the next
|
||||
// periodic sync should occur. If this returns true, it means the reflector should proceed with
|
||||
// the resync.
|
||||
ShouldResync ShouldResyncFunc
|
||||
|
||||
// If true, when Process() returns an error, re-enqueue the object.
|
||||
// TODO: add interface to let you inject a delay/backoff or drop
|
||||
// the object completely if desired. Pass the object in
|
||||
// question to this interface as a parameter.
|
||||
RetryOnError bool
|
||||
}
|
||||
|
||||
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
|
||||
// resync or not. It can be used by a shared informer to support multiple event handlers with custom
|
||||
// resync periods.
|
||||
type ShouldResyncFunc func() bool
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
type controller struct {
|
||||
config Config
|
||||
reflector *Reflector
|
||||
reflectorMutex sync.RWMutex
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
type Controller interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
// New makes a new Controller from the given Config.
|
||||
func New(c *Config) Controller {
|
||||
ctlr := &controller{
|
||||
config: *c,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
return ctlr
|
||||
}
|
||||
|
||||
// Run begins processing items, and will continue until a value is sent down stopCh.
|
||||
// It's an error to call Run more than once.
|
||||
// Run blocks; call via go.
|
||||
func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
go func() {
|
||||
<-stopCh
|
||||
c.config.Queue.Close()
|
||||
}()
|
||||
r := NewReflector(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.clock = c.clock
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
var wg wait.Group
|
||||
defer wg.Wait()
|
||||
|
||||
wg.StartWithChannel(stopCh, r.Run)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
func (c *controller) HasSynced() bool {
|
||||
return c.config.Queue.HasSynced()
|
||||
}
|
||||
|
||||
func (c *controller) LastSyncResourceVersion() string {
|
||||
if c.reflector == nil {
|
||||
return ""
|
||||
}
|
||||
return c.reflector.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
// processLoop drains the work queue.
|
||||
// TODO: Consider doing the processing in parallel. This will require a little thought
|
||||
// to make sure that we don't end up processing the same object multiple times
|
||||
// concurrently.
|
||||
//
|
||||
// TODO: Plumb through the stopCh here (and down to the queue) so that this can
|
||||
// actually exit when the controller is stopped. Or just give up on this stuff
|
||||
// ever being stoppable. Converting this whole package to use Context would
|
||||
// also be helpful.
|
||||
func (c *controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if err == FIFOClosedError {
|
||||
return
|
||||
}
|
||||
if c.config.RetryOnError {
|
||||
// This is the safe way to re-enqueue.
|
||||
c.config.Queue.AddIfNotPresent(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
OnDelete(obj interface{})
|
||||
}
|
||||
|
||||
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
DeleteFunc func(obj interface{})
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate calls UpdateFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
|
||||
if r.UpdateFunc != nil {
|
||||
r.UpdateFunc(oldObj, newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls DeleteFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
if r.DeleteFunc != nil {
|
||||
r.DeleteFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// FilteringResourceEventHandler applies the provided filter to all events coming
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
// object that stops passing the filter after an update is considered a delete.
|
||||
type FilteringResourceEventHandler struct {
|
||||
FilterFunc func(obj interface{}) bool
|
||||
Handler ResourceEventHandler
|
||||
}
|
||||
|
||||
// OnAdd calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnAdd(obj)
|
||||
}
|
||||
|
||||
// OnUpdate ensures the proper handler is called depending on whether the filter matches
|
||||
func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
newer := r.FilterFunc(newObj)
|
||||
older := r.FilterFunc(oldObj)
|
||||
switch {
|
||||
case newer && older:
|
||||
r.Handler.OnUpdate(oldObj, newObj)
|
||||
case newer && !older:
|
||||
r.Handler.OnAdd(newObj)
|
||||
case !newer && older:
|
||||
r.Handler.OnDelete(oldObj)
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnDelete(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnDelete(obj)
|
||||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// DeletedFinalStateUnknown objects before calling
|
||||
// MetaNamespaceKeyFunc.
|
||||
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return MetaNamespaceKeyFunc(obj)
|
||||
}
|
||||
|
||||
// NewInformer returns a Store and a controller for populating the store
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
) (Store, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * indexers is the indexer for the received object type.
|
||||
//
|
||||
func NewIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers Indexers,
|
||||
) (Indexer, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
}
|
||||
|
||||
// newInformer returns a controller for populating the store while also
|
||||
// providing event notifications.
|
||||
//
|
||||
// Parameters
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * clientState is the store you want to populate
|
||||
//
|
||||
func newInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
clientState Store,
|
||||
) Controller {
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return New(cfg)
|
||||
}
|
655
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
Normal file
655
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
Normal file
@ -0,0 +1,655 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NewDeltaFIFO returns a Store which can be used process changes to items.
|
||||
//
|
||||
// keyFunc is used to figure out what key an object should have. (It's
|
||||
// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
|
||||
//
|
||||
// 'keyLister' is expected to return a list of keys that the consumer of
|
||||
// this queue "knows about". It is used to decide which items are missing
|
||||
// when Replace() is called; 'Deleted' deltas are produced for these items.
|
||||
// It may be nil if you don't need to detect all deletions.
|
||||
// TODO: consider merging keyLister with this object, tracking a list of
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the comsumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
//
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
|
||||
// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
// storage. Currently these two methods are used for creating Lister
|
||||
// and internal tests.
|
||||
//
|
||||
// Also see the comment on DeltaFIFO.
|
||||
func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
|
||||
f := &DeltaFIFO{
|
||||
items: map[string]Deltas{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFunc,
|
||||
knownObjects: knownObjects,
|
||||
}
|
||||
f.cond.L = &f.lock
|
||||
return f
|
||||
}
|
||||
|
||||
// DeltaFIFO is like FIFO, but allows you to process deletes.
|
||||
//
|
||||
// DeltaFIFO is a producer-consumer queue, where a Reflector is
|
||||
// intended to be the producer, and the consumer is whatever calls
|
||||
// the Pop() method.
|
||||
//
|
||||
// DeltaFIFO solves this use case:
|
||||
// * You want to process every object change (delta) at most once.
|
||||
// * When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// * You want to process the deletion of objects.
|
||||
// * You might want to periodically reprocess objects.
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but it
|
||||
// will always return an object of type Deltas.
|
||||
//
|
||||
// A note on threading: If you call Pop() in parallel from multiple
|
||||
// threads, you could end up with multiple threads processing slightly
|
||||
// different versions of the same object.
|
||||
//
|
||||
// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
|
||||
// to list keys that are "known", for the purpose of figuring out which
|
||||
// items have been deleted when Replace() or Delete() are called. The deleted
|
||||
// object will be included in the DeleteFinalStateUnknown markers. These objects
|
||||
// could be stale.
|
||||
type DeltaFIFO struct {
|
||||
// lock/cond protects access to 'items' and 'queue'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// We depend on the property that items in the set are in
|
||||
// the queue and vice versa, and that all Deltas in this
|
||||
// map have at least one Delta.
|
||||
items map[string]Deltas
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
// or Delete/Add/Update was called first.
|
||||
populated bool
|
||||
// initialPopulationCount is the number of items inserted by the first call of Replace()
|
||||
initialPopulationCount int
|
||||
|
||||
// keyFunc is used to make the key used for queued item
|
||||
// insertion and retrieval, and should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
|
||||
// knownObjects list keys that are "known", for the
|
||||
// purpose of figuring out which items have been deleted
|
||||
// when Replace() or Delete() is called.
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas
|
||||
// object with zero length is encountered (should be impossible,
|
||||
// but included for completeness).
|
||||
ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
|
||||
)
|
||||
|
||||
// Close the queue.
|
||||
func (f *DeltaFIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or
|
||||
// DeletedFinalStateUnknown objects.
|
||||
func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(Deltas); ok {
|
||||
if len(d) == 0 {
|
||||
return "", KeyError{obj, ErrZeroLengthDeltasObject}
|
||||
}
|
||||
obj = d.Newest().Object
|
||||
}
|
||||
if d, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return f.keyFunc(obj)
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
func (f *DeltaFIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is only enqueued
|
||||
// if it doesn't already exist in the set.
|
||||
func (f *DeltaFIFO) Add(obj interface{}) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
return f.queueActionLocked(Added, obj)
|
||||
}
|
||||
|
||||
// Update is just like Add, but makes an Updated Delta.
|
||||
func (f *DeltaFIFO) Update(obj interface{}) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
return f.queueActionLocked(Updated, obj)
|
||||
}
|
||||
|
||||
// Delete is just like Add, but makes an Deleted Delta. If the item does not
|
||||
// already exist, it will be ignored. (It may have already been deleted by a
|
||||
// Replace (re-list), for example.
|
||||
func (f *DeltaFIFO) Delete(obj interface{}) error {
|
||||
id, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
if f.knownObjects == nil {
|
||||
if _, exists := f.items[id]; !exists {
|
||||
// Presumably, this was deleted when a relist happened.
|
||||
// Don't provide a second report of the same deletion.
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// We only want to skip the "deletion" action if the object doesn't
|
||||
// exist in knownObjects and it doesn't have corresponding item in items.
|
||||
// Note that even if there is a "deletion" action in items, we can ignore it,
|
||||
// because it will be deduped automatically in "queueActionLocked"
|
||||
_, exists, err := f.knownObjects.GetByKey(id)
|
||||
_, itemsExist := f.items[id]
|
||||
if err == nil && !exists && !itemsExist {
|
||||
// Presumably, this was deleted when a relist happened.
|
||||
// Don't provide a second report of the same deletion.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return f.queueActionLocked(Deleted, obj)
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
|
||||
// present in the set, it is neither enqueued nor added to the set.
|
||||
//
|
||||
// This is useful in a single producer/consumer scenario so that the consumer can
|
||||
// safely retry items without contending with the producer and potentially enqueueing
|
||||
// stale items.
|
||||
//
|
||||
// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is
|
||||
// different from the Add/Update/Delete functions.
|
||||
func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
|
||||
deltas, ok := obj.(Deltas)
|
||||
if !ok {
|
||||
return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
|
||||
}
|
||||
id, err := f.KeyOf(deltas.Newest().Object)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.addIfNotPresent(id, deltas)
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller
|
||||
// already holds the fifo lock.
|
||||
func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) {
|
||||
f.populated = true
|
||||
if _, exists := f.items[id]; exists {
|
||||
return
|
||||
}
|
||||
|
||||
f.queue = append(f.queue, id)
|
||||
f.items[id] = deltas
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// re-listing and watching can deliver the same update multiple times in any
|
||||
// order. This will combine the most recent two deltas if they are the same.
|
||||
func dedupDeltas(deltas Deltas) Deltas {
|
||||
n := len(deltas)
|
||||
if n < 2 {
|
||||
return deltas
|
||||
}
|
||||
a := &deltas[n-1]
|
||||
b := &deltas[n-2]
|
||||
if out := isDup(a, b); out != nil {
|
||||
d := append(Deltas{}, deltas[:n-2]...)
|
||||
return append(d, *out)
|
||||
}
|
||||
return deltas
|
||||
}
|
||||
|
||||
// If a & b represent the same event, returns the delta that ought to be kept.
|
||||
// Otherwise, returns nil.
|
||||
// TODO: is there anything other than deletions that need deduping?
|
||||
func isDup(a, b *Delta) *Delta {
|
||||
if out := isDeletionDup(a, b); out != nil {
|
||||
return out
|
||||
}
|
||||
// TODO: Detect other duplicate situations? Are there any?
|
||||
return nil
|
||||
}
|
||||
|
||||
// keep the one with the most information if both are deletions.
|
||||
func isDeletionDup(a, b *Delta) *Delta {
|
||||
if b.Type != Deleted || a.Type != Deleted {
|
||||
return nil
|
||||
}
|
||||
// Do more sophisticated checks, or is this sufficient?
|
||||
if _, ok := b.Object.(DeletedFinalStateUnknown); ok {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// willObjectBeDeletedLocked returns true only if the last delta for the
|
||||
// given object is Delete. Caller must lock first.
|
||||
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
|
||||
deltas := f.items[id]
|
||||
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
|
||||
}
|
||||
|
||||
// queueActionLocked appends to the delta list for the object.
|
||||
// Caller must lock first.
|
||||
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
|
||||
id, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// If object is supposed to be deleted (last event is Deleted),
|
||||
// then we should ignore Sync events, because it would result in
|
||||
// recreation of this object.
|
||||
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
if len(newDeltas) > 0 {
|
||||
if _, exists := f.items[id]; !exists {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
f.items[id] = newDeltas
|
||||
f.cond.Broadcast()
|
||||
} else {
|
||||
// We need to remove this from our map (extra items in the queue are
|
||||
// ignored if they are not in the map).
|
||||
delete(f.items, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns a list of all the items; it returns the object
|
||||
// from the most recent Delta.
|
||||
// You should treat the items returned inside the deltas as immutable.
|
||||
func (f *DeltaFIFO) List() []interface{} {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return f.listLocked()
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) listLocked() []interface{} {
|
||||
list := make([]interface{}, 0, len(f.items))
|
||||
for _, item := range f.items {
|
||||
list = append(list, item.Newest().Object)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the FIFO.
|
||||
func (f *DeltaFIFO) ListKeys() []string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]string, 0, len(f.items))
|
||||
for key := range f.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Get returns the complete list of deltas for the requested item,
|
||||
// or sets exists=false.
|
||||
// You should treat the items returned inside the deltas as immutable.
|
||||
func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
key, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return f.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the complete list of deltas for the requested item,
|
||||
// setting exists=false if that list is empty.
|
||||
// You should treat the items returned inside the deltas as immutable.
|
||||
func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
d, exists := f.items[key]
|
||||
if exists {
|
||||
// Copy item's slice so operations on this slice
|
||||
// won't interfere with the object we return.
|
||||
d = copyDeltas(d)
|
||||
}
|
||||
return d, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
// multiple items are ready, they are returned in the order in which they were
|
||||
// added/updated. The item is removed from the queue (and the store) before it
|
||||
// is returned, so if you don't successfully process it, you need to add it back
|
||||
// with AddIfNotPresent().
|
||||
// process function is called under lock, so it is safe update data structures
|
||||
// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
|
||||
// may return an instance of ErrRequeue with a nested error to indicate the current
|
||||
// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
|
||||
//
|
||||
// Pop returns a 'Deltas', which has a complete list of all the things
|
||||
// that happened to the object (deltas) while it was sitting in the queue.
|
||||
func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
for {
|
||||
for len(f.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
err := process(item)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
}
|
||||
// Don't need to copyDeltas here, because we're transferring
|
||||
// ownership to the caller.
|
||||
return item, err
|
||||
}
|
||||
}
|
||||
|
||||
// Replace will delete the contents of 'f', using instead the given map.
|
||||
// 'f' takes ownership of the map, you should not reference the map again
|
||||
// after calling this function. f's queue is reset, too; upon return, it
|
||||
// will contain the items in the map, in no particular order.
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
keys := make(sets.String, len(list))
|
||||
|
||||
for _, item := range list {
|
||||
key, err := f.KeyOf(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
keys.Insert(key)
|
||||
if err := f.queueActionLocked(Sync, item); err != nil {
|
||||
return fmt.Errorf("couldn't enqueue object: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if f.knownObjects == nil {
|
||||
// Do deletion detection against our own list.
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
// While there shouldn't be any queued deletions in the initial
|
||||
// population of the queue, it's better to be on the safe side.
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detect deletions not already in the queue.
|
||||
knownKeys := f.knownObjects.ListKeys()
|
||||
queuedDeletions := 0
|
||||
for _, k := range knownKeys {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will send a sync event for each item
|
||||
func (f *DeltaFIFO) Resync() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.knownObjects == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
keys := f.knownObjects.ListKeys()
|
||||
for _, k := range keys {
|
||||
if err := f.syncKeyLocked(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKey(key string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
return f.syncKeyLocked(key)
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKeyLocked(key string) error {
|
||||
obj, exists, err := f.knownObjects.GetByKey(key)
|
||||
if err != nil {
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
|
||||
return nil
|
||||
} else if !exists {
|
||||
klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we are doing Resync() and there is already an event queued for that object,
|
||||
// we ignore the Resync for it. This is to avoid the race, in which the resync
|
||||
// comes with the previous value of object (since queueing an event for the object
|
||||
// doesn't trigger changing the underlying store <knownObjects>.
|
||||
id, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
if len(f.items[id]) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := f.queueActionLocked(Sync, obj); err != nil {
|
||||
return fmt.Errorf("couldn't queue object: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A KeyListerGetter is anything that knows how to list its keys and look up by key.
|
||||
type KeyListerGetter interface {
|
||||
KeyLister
|
||||
KeyGetter
|
||||
}
|
||||
|
||||
// A KeyLister is anything that knows how to list its keys.
|
||||
type KeyLister interface {
|
||||
ListKeys() []string
|
||||
}
|
||||
|
||||
// A KeyGetter is anything that knows how to get the value stored under a given key.
|
||||
type KeyGetter interface {
|
||||
GetByKey(key string) (interface{}, bool, error)
|
||||
}
|
||||
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
type DeltaType string
|
||||
|
||||
const (
|
||||
Added DeltaType = "Added"
|
||||
Updated DeltaType = "Updated"
|
||||
Deleted DeltaType = "Deleted"
|
||||
// The other types are obvious. You'll get Sync deltas when:
|
||||
// * A watch expires/errors out and a new list/watch cycle is started.
|
||||
// * You've turned on periodic syncs.
|
||||
// (Anything that trigger's DeltaFIFO's Replace() method.)
|
||||
Sync DeltaType = "Sync"
|
||||
)
|
||||
|
||||
// Delta is the type stored by a DeltaFIFO. It tells you what change
|
||||
// happened, and the object's state after* that change.
|
||||
//
|
||||
// [*] Unless the change is a deletion, and then you'll get the final
|
||||
// state of the object before it was deleted.
|
||||
type Delta struct {
|
||||
Type DeltaType
|
||||
Object interface{}
|
||||
}
|
||||
|
||||
// Deltas is a list of one or more 'Delta's to an individual object.
|
||||
// The oldest delta is at index 0, the newest delta is the last one.
|
||||
type Deltas []Delta
|
||||
|
||||
// Oldest is a convenience function that returns the oldest delta, or
|
||||
// nil if there are no deltas.
|
||||
func (d Deltas) Oldest() *Delta {
|
||||
if len(d) > 0 {
|
||||
return &d[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Newest is a convenience function that returns the newest delta, or
|
||||
// nil if there are no deltas.
|
||||
func (d Deltas) Newest() *Delta {
|
||||
if n := len(d); n > 0 {
|
||||
return &d[n-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyDeltas returns a shallow copy of d; that is, it copies the slice but not
|
||||
// the objects in the slice. This allows Get/List to return an object that we
|
||||
// know won't be clobbered by a subsequent modifications.
|
||||
func copyDeltas(d Deltas) Deltas {
|
||||
d2 := make(Deltas, len(d))
|
||||
copy(d2, d)
|
||||
return d2
|
||||
}
|
||||
|
||||
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
|
||||
// an object was deleted but the watch deletion event was missed. In this
|
||||
// case we don't know the final "resting" state of the object, so there's
|
||||
// a chance the included `Obj` is stale.
|
||||
type DeletedFinalStateUnknown struct {
|
||||
Key string
|
||||
Obj interface{}
|
||||
}
|
24
vendor/k8s.io/client-go/tools/cache/doc.go
generated
vendored
Normal file
24
vendor/k8s.io/client-go/tools/cache/doc.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cache is a client-side caching mechanism. It is useful for
|
||||
// reducing the number of server calls you'd otherwise need to make.
|
||||
// Reflector watches a server and updates a Store. Two stores are provided;
|
||||
// one that simply caches objects (for example, to allow a scheduler to
|
||||
// list currently available nodes), and one that additionally acts as
|
||||
// a FIFO queue (for example, to allow a scheduler to process incoming
|
||||
// pods).
|
||||
package cache // import "k8s.io/client-go/tools/cache"
|
208
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
Normal file
208
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
// Note that the ExpirationCache is inherently slower than a normal
|
||||
// threadSafeStore because it takes a write lock every time it checks if
|
||||
// an item has expired.
|
||||
type ExpirationCache struct {
|
||||
cacheStorage ThreadSafeStore
|
||||
keyFunc KeyFunc
|
||||
clock clock.Clock
|
||||
expirationPolicy ExpirationPolicy
|
||||
// expirationLock is a write lock used to guarantee that we don't clobber
|
||||
// newly inserted objects because of a stale expiration timestamp comparison
|
||||
expirationLock sync.Mutex
|
||||
}
|
||||
|
||||
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
|
||||
// so unittests don't rely on the system clock.
|
||||
type ExpirationPolicy interface {
|
||||
IsExpired(obj *timestampedEntry) bool
|
||||
}
|
||||
|
||||
// TTLPolicy implements a ttl based ExpirationPolicy.
|
||||
type TTLPolicy struct {
|
||||
// >0: Expire entries with an age > ttl
|
||||
// <=0: Don't expire any entry
|
||||
Ttl time.Duration
|
||||
|
||||
// Clock used to calculate ttl expiration
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
// IsExpired returns true if the given object is older than the ttl, or it can't
|
||||
// determine its age.
|
||||
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
|
||||
}
|
||||
|
||||
// timestampedEntry is the only type allowed in a ExpirationCache.
|
||||
type timestampedEntry struct {
|
||||
obj interface{}
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// getTimestampedEntry returns the timestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
|
||||
item, _ := c.cacheStorage.Get(key)
|
||||
if tsEntry, ok := item.(*timestampedEntry); ok {
|
||||
return tsEntry, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
|
||||
// already expired. It holds a write lock across deletion.
|
||||
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
// Prevent all inserts from the time we deem an item as "expired" to when we
|
||||
// delete it, so an un-expired item doesn't sneak in under the same key, just
|
||||
// before the Delete.
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
timestampedItem, exists := c.getTimestampedEntry(key)
|
||||
if !exists {
|
||||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
return timestampedItem.obj, true
|
||||
}
|
||||
|
||||
// GetByKey returns the item stored under the key, or sets exists=false.
|
||||
func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
obj, exists := c.getOrExpire(key)
|
||||
return obj, exists, nil
|
||||
}
|
||||
|
||||
// Get returns unexpired items. It purges the cache of expired items in the
|
||||
// process.
|
||||
func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
obj, exists := c.getOrExpire(key)
|
||||
return obj, exists, nil
|
||||
}
|
||||
|
||||
// List retrieves a list of unexpired items. It purges the cache of expired
|
||||
// items in the process.
|
||||
func (c *ExpirationCache) List() []interface{} {
|
||||
items := c.cacheStorage.List()
|
||||
|
||||
list := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
obj := item.(*timestampedEntry).obj
|
||||
if key, err := c.keyFunc(obj); err != nil {
|
||||
list = append(list, obj)
|
||||
} else if obj, exists := c.getOrExpire(key); exists {
|
||||
list = append(list, obj)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all keys in the expiration cache.
|
||||
func (c *ExpirationCache) ListKeys() []string {
|
||||
return c.cacheStorage.ListKeys()
|
||||
}
|
||||
|
||||
// Add timestamps an item and inserts it into the cache, overwriting entries
|
||||
// that might exist under the same key.
|
||||
func (c *ExpirationCache) Add(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update has not been implemented yet for lack of a use case, so this method
|
||||
// simply calls `Add`. This effectively refreshes the timestamp.
|
||||
func (c *ExpirationCache) Update(obj interface{}) error {
|
||||
return c.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (c *ExpirationCache) Delete(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replace will convert all items in the given list to TimestampedEntries
|
||||
// before attempting the replace operation. The replace operation will
|
||||
// delete the contents of the ExpirationCache `c`.
|
||||
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := make(map[string]interface{}, len(list))
|
||||
ts := c.clock.Now()
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = ×tampedEntry{item, ts}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
c.cacheStorage.Replace(items, resourceVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will touch all objects to put them into the processing queue
|
||||
func (c *ExpirationCache) Resync() error {
|
||||
return c.cacheStorage.Resync()
|
||||
}
|
||||
|
||||
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
|
||||
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
|
||||
return &ExpirationCache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
clock: clock.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
|
||||
}
|
||||
}
|
54
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
Normal file
54
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
type fakeThreadSafeMap struct {
|
||||
ThreadSafeStore
|
||||
deletedKeys chan<- string
|
||||
}
|
||||
|
||||
func (c *fakeThreadSafeMap) Delete(key string) {
|
||||
if c.deletedKeys != nil {
|
||||
c.ThreadSafeStore.Delete(key)
|
||||
c.deletedKeys <- key
|
||||
}
|
||||
}
|
||||
|
||||
type FakeExpirationPolicy struct {
|
||||
NeverExpire sets.String
|
||||
RetrieveKeyFunc KeyFunc
|
||||
}
|
||||
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
key, _ := p.RetrieveKeyFunc(obj)
|
||||
return !p.NeverExpire.Has(key)
|
||||
}
|
||||
|
||||
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
|
||||
cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
|
||||
return &ExpirationCache{
|
||||
cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys},
|
||||
keyFunc: keyFunc,
|
||||
clock: cacheClock,
|
||||
expirationPolicy: expirationPolicy,
|
||||
}
|
||||
}
|
102
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
Normal file
102
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
// FakeStore lets you define custom functions for store operations
|
||||
type FakeCustomStore struct {
|
||||
AddFunc func(obj interface{}) error
|
||||
UpdateFunc func(obj interface{}) error
|
||||
DeleteFunc func(obj interface{}) error
|
||||
ListFunc func() []interface{}
|
||||
ListKeysFunc func() []string
|
||||
GetFunc func(obj interface{}) (item interface{}, exists bool, err error)
|
||||
GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
|
||||
ReplaceFunc func(list []interface{}, resourceVerion string) error
|
||||
ResyncFunc func() error
|
||||
}
|
||||
|
||||
// Add calls the custom Add function if defined
|
||||
func (f *FakeCustomStore) Add(obj interface{}) error {
|
||||
if f.AddFunc != nil {
|
||||
return f.AddFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update calls the custom Update function if defined
|
||||
func (f *FakeCustomStore) Update(obj interface{}) error {
|
||||
if f.UpdateFunc != nil {
|
||||
return f.UpdateFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete calls the custom Delete function if defined
|
||||
func (f *FakeCustomStore) Delete(obj interface{}) error {
|
||||
if f.DeleteFunc != nil {
|
||||
return f.DeleteFunc(obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List calls the custom List function if defined
|
||||
func (f *FakeCustomStore) List() []interface{} {
|
||||
if f.ListFunc != nil {
|
||||
return f.ListFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListKeys calls the custom ListKeys function if defined
|
||||
func (f *FakeCustomStore) ListKeys() []string {
|
||||
if f.ListKeysFunc != nil {
|
||||
return f.ListKeysFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get calls the custom Get function if defined
|
||||
func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
if f.GetFunc != nil {
|
||||
return f.GetFunc(obj)
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// GetByKey calls the custom GetByKey function if defined
|
||||
func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
if f.GetByKeyFunc != nil {
|
||||
return f.GetByKeyFunc(key)
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Replace calls the custom Replace function if defined
|
||||
func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error {
|
||||
if f.ReplaceFunc != nil {
|
||||
return f.ReplaceFunc(list, resourceVersion)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync calls the custom Resync function if defined
|
||||
func (f *FakeCustomStore) Resync() error {
|
||||
if f.ResyncFunc != nil {
|
||||
return f.ResyncFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
358
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
Normal file
358
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
Normal file
@ -0,0 +1,358 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// PopProcessFunc is passed to Pop() method of Queue interface.
|
||||
// It is supposed to process the element popped from the queue.
|
||||
type PopProcessFunc func(interface{}) error
|
||||
|
||||
// ErrRequeue may be returned by a PopProcessFunc to safely requeue
|
||||
// the current item. The value of Err will be returned from Pop.
|
||||
type ErrRequeue struct {
|
||||
// Err is returned by the Pop function
|
||||
Err error
|
||||
}
|
||||
|
||||
var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
|
||||
|
||||
func (e ErrRequeue) Error() string {
|
||||
if e.Err == nil {
|
||||
return "the popped item should be requeued without returning an error"
|
||||
}
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Queue is exactly like a Store, but has a Pop() method too.
|
||||
type Queue interface {
|
||||
Store
|
||||
|
||||
// Pop blocks until it has something to process.
|
||||
// It returns the object that was process and the result of processing.
|
||||
// The PopProcessFunc may return an ErrRequeue{...} to indicate the item
|
||||
// should be requeued before releasing the lock on the queue.
|
||||
Pop(PopProcessFunc) (interface{}, error)
|
||||
|
||||
// AddIfNotPresent adds a value previously
|
||||
// returned by Pop back into the queue as long
|
||||
// as nothing else (presumably more recent)
|
||||
// has since been added.
|
||||
AddIfNotPresent(interface{}) error
|
||||
|
||||
// HasSynced returns true if the first batch of items has been popped
|
||||
HasSynced() bool
|
||||
|
||||
// Close queue
|
||||
Close()
|
||||
}
|
||||
|
||||
// Helper function for popping from Queue.
|
||||
// WARNING: Do NOT use this function in non-test code to avoid races
|
||||
// unless you really really really really know what you are doing.
|
||||
func Pop(queue Queue) interface{} {
|
||||
var result interface{}
|
||||
queue.Pop(func(obj interface{}) error {
|
||||
result = obj
|
||||
return nil
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// FIFO receives adds and updates from a Reflector, and puts them in a queue for
|
||||
// FIFO order processing. If multiple adds/updates of a single item happen while
|
||||
// an item is in the queue before it has been processed, it will only be
|
||||
// processed once, and when it is processed, the most recent version will be
|
||||
// processed. This can't be done with a channel.
|
||||
//
|
||||
// FIFO solves this use case:
|
||||
// * You want to process every object (exactly) once.
|
||||
// * You want to process the most recent version of the object when you process it.
|
||||
// * You do not want to process deleted objects, they should be removed from the queue.
|
||||
// * You do not want to periodically reprocess objects.
|
||||
// Compare with DeltaFIFO for other use cases.
|
||||
type FIFO struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
// We depend on the property that items in the set are in the queue and vice versa.
|
||||
items map[string]interface{}
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
// or Delete/Add/Update was called first.
|
||||
populated bool
|
||||
// initialPopulationCount is the number of items inserted by the first call of Replace()
|
||||
initialPopulationCount int
|
||||
|
||||
// keyFunc is used to make the key used for queued item insertion and retrieval, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Queue(&FIFO{}) // FIFO is a Queue
|
||||
)
|
||||
|
||||
// Close the queue.
|
||||
func (f *FIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is only enqueued
|
||||
// if it doesn't already exist in the set.
|
||||
func (f *FIFO) Add(obj interface{}) error {
|
||||
id, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
if _, exists := f.items[id]; !exists {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
f.items[id] = obj
|
||||
f.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
|
||||
// present in the set, it is neither enqueued nor added to the set.
|
||||
//
|
||||
// This is useful in a single producer/consumer scenario so that the consumer can
|
||||
// safely retry items without contending with the producer and potentially enqueueing
|
||||
// stale items.
|
||||
func (f *FIFO) AddIfNotPresent(obj interface{}) error {
|
||||
id, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.addIfNotPresent(id, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresent assumes the fifo lock is already held and adds the provided
|
||||
// item to the queue under id if it does not already exist.
|
||||
func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
|
||||
f.populated = true
|
||||
if _, exists := f.items[id]; exists {
|
||||
return
|
||||
}
|
||||
|
||||
f.queue = append(f.queue, id)
|
||||
f.items[id] = obj
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Update is the same as Add in this implementation.
|
||||
func (f *FIFO) Update(obj interface{}) error {
|
||||
return f.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item. It doesn't add it to the queue, because
|
||||
// this implementation assumes the consumer only cares about the objects,
|
||||
// not the order in which they were created/added.
|
||||
func (f *FIFO) Delete(obj interface{}) error {
|
||||
id, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.populated = true
|
||||
delete(f.items, id)
|
||||
return err
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
func (f *FIFO) List() []interface{} {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]interface{}, 0, len(f.items))
|
||||
for _, item := range f.items {
|
||||
list = append(list, item)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the FIFO.
|
||||
func (f *FIFO) ListKeys() []string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]string, 0, len(f.items))
|
||||
for key := range f.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
key, err := f.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return f.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the requested item, or sets exists=false.
|
||||
func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
item, exists = f.items[key]
|
||||
return item, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready and processes it. If multiple items are
|
||||
// ready, they are returned in the order in which they were added/updated.
|
||||
// The item is removed from the queue (and the store) before it is processed,
|
||||
// so if you don't successfully process it, it should be added back with
|
||||
// AddIfNotPresent(). process function is called under lock, so it is safe
|
||||
// update data structures in it that need to be in sync with the queue.
|
||||
func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
for {
|
||||
for len(f.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
err := process(item)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
}
|
||||
return item, err
|
||||
}
|
||||
}
|
||||
|
||||
// Replace will delete the contents of 'f', using instead the given map.
|
||||
// 'f' takes ownership of the map, you should not reference the map again
|
||||
// after calling this function. f's queue is reset, too; upon return, it
|
||||
// will contain the items in the map, in no particular order.
|
||||
func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := make(map[string]interface{}, len(list))
|
||||
for _, item := range list {
|
||||
key, err := f.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = item
|
||||
}
|
||||
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(items)
|
||||
}
|
||||
|
||||
f.items = items
|
||||
f.queue = f.queue[:0]
|
||||
for id := range items {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
if len(f.queue) > 0 {
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will touch all objects to put them into the processing queue
|
||||
func (f *FIFO) Resync() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
inQueue := sets.NewString()
|
||||
for _, id := range f.queue {
|
||||
inQueue.Insert(id)
|
||||
}
|
||||
for id := range f.items {
|
||||
if !inQueue.Has(id) {
|
||||
f.queue = append(f.queue, id)
|
||||
}
|
||||
}
|
||||
if len(f.queue) > 0 {
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFIFO returns a Store which can be used to queue up items to
|
||||
// process.
|
||||
func NewFIFO(keyFunc KeyFunc) *FIFO {
|
||||
f := &FIFO{
|
||||
items: map[string]interface{}{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFunc,
|
||||
}
|
||||
f.cond.L = &f.lock
|
||||
return f
|
||||
}
|
323
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
Normal file
323
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
Normal file
@ -0,0 +1,323 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file implements a heap data structure.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
closedMsg = "heap is closed"
|
||||
)
|
||||
|
||||
type LessFunc func(interface{}, interface{}) bool
|
||||
type heapItem struct {
|
||||
obj interface{} // The object which is stored in the heap.
|
||||
index int // The index of the object's key in the Heap.queue.
|
||||
}
|
||||
|
||||
type itemKeyValue struct {
|
||||
key string
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
// heapData is an internal struct that implements the standard heap interface
|
||||
// and keeps the data stored in the heap.
|
||||
type heapData struct {
|
||||
// items is a map from key of the objects to the objects and their index.
|
||||
// We depend on the property that items in the map are in the queue and vice versa.
|
||||
items map[string]*heapItem
|
||||
// queue implements a heap data structure and keeps the order of elements
|
||||
// according to the heap invariant. The queue keeps the keys of objects stored
|
||||
// in "items".
|
||||
queue []string
|
||||
|
||||
// keyFunc is used to make the key used for queued item insertion and retrieval, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
// lessFunc is used to compare two objects in the heap.
|
||||
lessFunc LessFunc
|
||||
}
|
||||
|
||||
var (
|
||||
_ = heap.Interface(&heapData{}) // heapData is a standard heap
|
||||
)
|
||||
|
||||
// Less compares two objects and returns true if the first one should go
|
||||
// in front of the second one in the heap.
|
||||
func (h *heapData) Less(i, j int) bool {
|
||||
if i > len(h.queue) || j > len(h.queue) {
|
||||
return false
|
||||
}
|
||||
itemi, ok := h.items[h.queue[i]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
itemj, ok := h.items[h.queue[j]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return h.lessFunc(itemi.obj, itemj.obj)
|
||||
}
|
||||
|
||||
// Len returns the number of items in the Heap.
|
||||
func (h *heapData) Len() int { return len(h.queue) }
|
||||
|
||||
// Swap implements swapping of two elements in the heap. This is a part of standard
|
||||
// heap interface and should never be called directly.
|
||||
func (h *heapData) Swap(i, j int) {
|
||||
h.queue[i], h.queue[j] = h.queue[j], h.queue[i]
|
||||
item := h.items[h.queue[i]]
|
||||
item.index = i
|
||||
item = h.items[h.queue[j]]
|
||||
item.index = j
|
||||
}
|
||||
|
||||
// Push is supposed to be called by heap.Push only.
|
||||
func (h *heapData) Push(kv interface{}) {
|
||||
keyValue := kv.(*itemKeyValue)
|
||||
n := len(h.queue)
|
||||
h.items[keyValue.key] = &heapItem{keyValue.obj, n}
|
||||
h.queue = append(h.queue, keyValue.key)
|
||||
}
|
||||
|
||||
// Pop is supposed to be called by heap.Pop only.
|
||||
func (h *heapData) Pop() interface{} {
|
||||
key := h.queue[len(h.queue)-1]
|
||||
h.queue = h.queue[0 : len(h.queue)-1]
|
||||
item, ok := h.items[key]
|
||||
if !ok {
|
||||
// This is an error
|
||||
return nil
|
||||
}
|
||||
delete(h.items, key)
|
||||
return item.obj
|
||||
}
|
||||
|
||||
// Heap is a thread-safe producer/consumer queue that implements a heap data structure.
|
||||
// It can be used to implement priority queues and similar data structures.
|
||||
type Heap struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// data stores objects and has a queue that keeps their ordering according
|
||||
// to the heap invariant.
|
||||
data *heapData
|
||||
|
||||
// closed indicates that the queue is closed.
|
||||
// It is mainly used to let Pop() exit its control loop while waiting for an item.
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Close the Heap and signals condition variables that may be waiting to pop
|
||||
// items from the heap.
|
||||
func (h *Heap) Close() {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
h.closed = true
|
||||
h.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Add inserts an item, and puts it in the queue. The item is updated if it
|
||||
// already exists.
|
||||
func (h *Heap) Add(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
h.data.items[key].obj = obj
|
||||
heap.Fix(h.data, h.data.items[key].index)
|
||||
} else {
|
||||
h.addIfNotPresentLocked(key, obj)
|
||||
}
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds all the items in the list to the queue and then signals the condition
|
||||
// variable. It is useful when the caller would like to add all of the items
|
||||
// to the queue before consumer starts processing them.
|
||||
func (h *Heap) BulkAdd(list []interface{}) error {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
for _, obj := range list {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
h.data.items[key].obj = obj
|
||||
heap.Fix(h.data, h.data.items[key].index)
|
||||
} else {
|
||||
h.addIfNotPresentLocked(key, obj)
|
||||
}
|
||||
}
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddIfNotPresent inserts an item, and puts it in the queue. If an item with
|
||||
// the key is present in the map, no changes is made to the item.
|
||||
//
|
||||
// This is useful in a single producer/consumer scenario so that the consumer can
|
||||
// safely retry items without contending with the producer and potentially enqueueing
|
||||
// stale items.
|
||||
func (h *Heap) AddIfNotPresent(obj interface{}) error {
|
||||
id, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if h.closed {
|
||||
return fmt.Errorf(closedMsg)
|
||||
}
|
||||
h.addIfNotPresentLocked(id, obj)
|
||||
h.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresentLocked assumes the lock is already held and adds the provided
|
||||
// item to the queue if it does not already exist.
|
||||
func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) {
|
||||
if _, exists := h.data.items[key]; exists {
|
||||
return
|
||||
}
|
||||
heap.Push(h.data, &itemKeyValue{key, obj})
|
||||
}
|
||||
|
||||
// Update is the same as Add in this implementation. When the item does not
|
||||
// exist, it is added.
|
||||
func (h *Heap) Update(obj interface{}) error {
|
||||
return h.Add(obj)
|
||||
}
|
||||
|
||||
// Delete removes an item.
|
||||
func (h *Heap) Delete(obj interface{}) error {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if item, ok := h.data.items[key]; ok {
|
||||
heap.Remove(h.data, item.index)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("object not found")
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready. If multiple items are
|
||||
// ready, they are returned in the order given by Heap.data.lessFunc.
|
||||
func (h *Heap) Pop() (interface{}, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
for len(h.data.queue) == 0 {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the h.closed is set and the condition is broadcast,
|
||||
// which causes this loop to continue and return from the Pop().
|
||||
if h.closed {
|
||||
return nil, fmt.Errorf("heap is closed")
|
||||
}
|
||||
h.cond.Wait()
|
||||
}
|
||||
obj := heap.Pop(h.data)
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("object was removed from heap data")
|
||||
}
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
func (h *Heap) List() []interface{} {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
list := make([]interface{}, 0, len(h.data.items))
|
||||
for _, item := range h.data.items {
|
||||
list = append(list, item.obj)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently in the Heap.
|
||||
func (h *Heap) ListKeys() []string {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
list := make([]string, 0, len(h.data.items))
|
||||
for key := range h.data.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
func (h *Heap) Get(obj interface{}) (interface{}, bool, error) {
|
||||
key, err := h.data.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return h.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the requested item, or sets exists=false.
|
||||
func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
item, exists := h.data.items[key]
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
return item.obj, true, nil
|
||||
}
|
||||
|
||||
// IsClosed returns true if the queue is closed.
|
||||
func (h *Heap) IsClosed() bool {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
if h.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NewHeap returns a Heap which can be used to queue up items to process.
|
||||
func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
|
||||
h := &Heap{
|
||||
data: &heapData{
|
||||
items: map[string]*heapItem{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFn,
|
||||
lessFunc: lessFn,
|
||||
},
|
||||
}
|
||||
h.cond.L = &h.lock
|
||||
return h
|
||||
}
|
87
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
Normal file
87
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Indexer is a storage interface that lets you list objects using multiple indexing functions
|
||||
type Indexer interface {
|
||||
Store
|
||||
// Retrieve list of objects that match on the named indexing function
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
// IndexKeys returns the set of keys that match on the named indexing function.
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
ListIndexFuncValues(indexName string) []string
|
||||
// ByIndex lists object that match on the named indexing function with the exact key
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
// GetIndexer return the indexers
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
// in the store, the results are undefined.
|
||||
AddIndexers(newIndexers Indexers) error
|
||||
}
|
||||
|
||||
// IndexFunc knows how to provide an indexed value for an object.
|
||||
type IndexFunc func(obj interface{}) ([]string, error)
|
||||
|
||||
// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns
|
||||
// unique values for every object. This is conversion can create errors when more than one key is found. You
|
||||
// should prefer to make proper key and index functions.
|
||||
func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
return func(obj interface{}) (string, error) {
|
||||
indexKeys, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(indexKeys) > 1 {
|
||||
return "", fmt.Errorf("too many keys: %v", indexKeys)
|
||||
}
|
||||
if len(indexKeys) == 0 {
|
||||
return "", fmt.Errorf("unexpected empty indexKeys")
|
||||
}
|
||||
return indexKeys[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
NamespaceIndex string = "namespace"
|
||||
)
|
||||
|
||||
// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace
|
||||
func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
|
||||
meta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return []string{""}, fmt.Errorf("object has no meta: %v", err)
|
||||
}
|
||||
return []string{meta.GetNamespace()}, nil
|
||||
}
|
||||
|
||||
// Index maps the indexed value to a set of keys in the store that match on that value
|
||||
type Index map[string]sets.String
|
||||
|
||||
// Indexers maps a name to a IndexFunc
|
||||
type Indexers map[string]IndexFunc
|
||||
|
||||
// Indices maps a name to an Index
|
||||
type Indices map[string]Index
|
180
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
Normal file
180
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// AppendFunc is used to add a matching item to whatever list the caller is using
|
||||
type AppendFunc func(interface{})
|
||||
|
||||
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
for _, m := range store.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
if namespace == metav1.NamespaceAll {
|
||||
for _, m := range indexer.List() {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
|
||||
if err != nil {
|
||||
// Ignore error; do slow search without index.
|
||||
klog.Warningf("can not retrieve list of objects using index : %v", err)
|
||||
for _, m := range indexer.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for _, m := range items {
|
||||
if selectAll {
|
||||
// Avoid computing labels of the objects to speed up common flows
|
||||
// of listing all objects.
|
||||
appendFn(m)
|
||||
continue
|
||||
}
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenericLister is a lister skin on a generic Indexer
|
||||
type GenericLister interface {
|
||||
// List will return all objects across namespaces
|
||||
List(selector labels.Selector) (ret []runtime.Object, err error)
|
||||
// Get will attempt to retrieve assuming that name==key
|
||||
Get(name string) (runtime.Object, error)
|
||||
// ByNamespace will give you a GenericNamespaceLister for one namespace
|
||||
ByNamespace(namespace string) GenericNamespaceLister
|
||||
}
|
||||
|
||||
// GenericNamespaceLister is a lister skin on a generic Indexer
|
||||
type GenericNamespaceLister interface {
|
||||
// List will return all objects in this namespace
|
||||
List(selector labels.Selector) (ret []runtime.Object, err error)
|
||||
// Get will attempt to retrieve by namespace and name
|
||||
Get(name string) (runtime.Object, error)
|
||||
}
|
||||
|
||||
func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
|
||||
return &genericLister{indexer: indexer, resource: resource}
|
||||
}
|
||||
|
||||
type genericLister struct {
|
||||
indexer Indexer
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
err = ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(runtime.Object))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister {
|
||||
return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource}
|
||||
}
|
||||
|
||||
func (s *genericLister) Get(name string) (runtime.Object, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(s.resource, name)
|
||||
}
|
||||
return obj.(runtime.Object), nil
|
||||
}
|
||||
|
||||
type genericNamespaceLister struct {
|
||||
indexer Indexer
|
||||
namespace string
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
|
||||
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(runtime.Object))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(s.resource, name)
|
||||
}
|
||||
return obj.(runtime.Object), nil
|
||||
}
|
114
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
Normal file
114
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
)
|
||||
|
||||
// Lister is any object that knows how to perform an initial list.
|
||||
type Lister interface {
|
||||
// List should return a list type object; the Items field will be extracted, and the
|
||||
// ResourceVersion field will be used to start the watch in the right place.
|
||||
List(options metav1.ListOptions) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// Watcher is any object that knows how to start a watch on a resource.
|
||||
type Watcher interface {
|
||||
// Watch should begin a watch at the specified version.
|
||||
Watch(options metav1.ListOptions) (watch.Interface, error)
|
||||
}
|
||||
|
||||
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
|
||||
type ListerWatcher interface {
|
||||
Lister
|
||||
Watcher
|
||||
}
|
||||
|
||||
// ListFunc knows how to list resources
|
||||
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
|
||||
|
||||
// WatchFunc knows how to watch resources
|
||||
type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
|
||||
|
||||
// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface.
|
||||
// It is a convenience function for users of NewReflector, etc.
|
||||
// ListFunc and WatchFunc must not be nil
|
||||
type ListWatch struct {
|
||||
ListFunc ListFunc
|
||||
WatchFunc WatchFunc
|
||||
// DisableChunking requests no chunking for this list watcher.
|
||||
DisableChunking bool
|
||||
}
|
||||
|
||||
// Getter interface knows how to access Get method from RESTClient.
|
||||
type Getter interface {
|
||||
Get() *restclient.Request
|
||||
}
|
||||
|
||||
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
|
||||
func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
|
||||
optionsModifier := func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fieldSelector.String()
|
||||
}
|
||||
return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier)
|
||||
}
|
||||
|
||||
// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier.
|
||||
// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function
|
||||
// to apply modification to ListOptions with a field selector, a label selector, or any other desired options.
|
||||
func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch {
|
||||
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
optionsModifier(&options)
|
||||
return c.Get().
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Do().
|
||||
Get()
|
||||
}
|
||||
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.Watch = true
|
||||
optionsModifier(&options)
|
||||
return c.Get().
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
|
||||
}
|
||||
|
||||
// List a set of apiserver resources
|
||||
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if !lw.DisableChunking {
|
||||
return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
|
||||
}
|
||||
return lw.ListFunc(options)
|
||||
}
|
||||
|
||||
// Watch a set of apiserver resources
|
||||
func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.WatchFunc(options)
|
||||
}
|
261
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
Normal file
261
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
Normal file
@ -0,0 +1,261 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilcache "k8s.io/apimachinery/pkg/util/cache"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// MutationCache is able to take the result of update operations and stores them in an LRU
|
||||
// that can be used to provide a more current view of a requested object. It requires interpreting
|
||||
// resourceVersions for comparisons.
|
||||
// Implementations must be thread-safe.
|
||||
// TODO find a way to layer this into an informer/lister
|
||||
type MutationCache interface {
|
||||
GetByKey(key string) (interface{}, bool, error)
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
Mutation(interface{})
|
||||
}
|
||||
|
||||
type ResourceVersionComparator interface {
|
||||
CompareResourceVersion(lhs, rhs runtime.Object) int
|
||||
}
|
||||
|
||||
// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to
|
||||
// deal with objects that have a resource version that:
|
||||
//
|
||||
// - is an integer
|
||||
// - increases when updated
|
||||
// - is comparable across the same resource in a namespace
|
||||
//
|
||||
// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item
|
||||
// remains in the mutation cache before it is removed.
|
||||
//
|
||||
// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist
|
||||
// in the underlying store. This is only safe if your use of the cache can handle mutation entries
|
||||
// remaining in the cache for up to ttl when mutations and deletes occur very closely in time.
|
||||
func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache {
|
||||
return &mutationCache{
|
||||
backingCache: backingCache,
|
||||
indexer: indexer,
|
||||
mutationCache: utilcache.NewLRUExpireCache(100),
|
||||
comparator: etcdObjectVersioner{},
|
||||
ttl: ttl,
|
||||
includeAdds: includeAdds,
|
||||
}
|
||||
}
|
||||
|
||||
// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and
|
||||
// since you can't distinguish between, "didn't observe create" and "was deleted after create",
|
||||
// if the key is missing from the backing cache, we always return it as missing
|
||||
type mutationCache struct {
|
||||
lock sync.Mutex
|
||||
backingCache Store
|
||||
indexer Indexer
|
||||
mutationCache *utilcache.LRUExpireCache
|
||||
includeAdds bool
|
||||
ttl time.Duration
|
||||
|
||||
comparator ResourceVersionComparator
|
||||
}
|
||||
|
||||
// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could
|
||||
// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key.
|
||||
// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache.
|
||||
func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
obj, exists, err := c.backingCache.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !exists {
|
||||
if !c.includeAdds {
|
||||
// we can't distinguish between, "didn't observe create" and "was deleted after create", so
|
||||
// if the key is missing, we always return it as missing
|
||||
return nil, false, nil
|
||||
}
|
||||
obj, exists = c.mutationCache.Get(key)
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
objRuntime, ok := obj.(runtime.Object)
|
||||
if !ok {
|
||||
return obj, true, nil
|
||||
}
|
||||
return c.newerObject(key, objRuntime), true, nil
|
||||
}
|
||||
|
||||
// ByIndex returns the newer objects that match the provided index and indexer key.
|
||||
// Will return an error if no indexer was provided.
|
||||
func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.indexer == nil {
|
||||
return nil, fmt.Errorf("no indexer has been provided to the mutation cache")
|
||||
}
|
||||
keys, err := c.indexer.IndexKeys(name, indexKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var items []interface{}
|
||||
keySet := sets.NewString()
|
||||
for _, key := range keys {
|
||||
keySet.Insert(key)
|
||||
obj, exists, err := c.indexer.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
if objRuntime, ok := obj.(runtime.Object); ok {
|
||||
items = append(items, c.newerObject(key, objRuntime))
|
||||
} else {
|
||||
items = append(items, obj)
|
||||
}
|
||||
}
|
||||
|
||||
if c.includeAdds {
|
||||
fn := c.indexer.GetIndexers()[name]
|
||||
// Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior
|
||||
for _, key := range c.mutationCache.Keys() {
|
||||
updated, ok := c.mutationCache.Get(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if keySet.Has(key.(string)) {
|
||||
continue
|
||||
}
|
||||
elements, err := fn(updated)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
continue
|
||||
}
|
||||
for _, inIndex := range elements {
|
||||
if inIndex != indexKey {
|
||||
continue
|
||||
}
|
||||
items = append(items, updated)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// newerObject checks the mutation cache for a newer object and returns one if found. If the
|
||||
// mutated object is older than the backing object, it is removed from the Must be
|
||||
// called while the lock is held.
|
||||
func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object {
|
||||
mutatedObj, exists := c.mutationCache.Get(key)
|
||||
if !exists {
|
||||
return backing
|
||||
}
|
||||
mutatedObjRuntime, ok := mutatedObj.(runtime.Object)
|
||||
if !ok {
|
||||
return backing
|
||||
}
|
||||
if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 {
|
||||
c.mutationCache.Remove(key)
|
||||
return backing
|
||||
}
|
||||
return mutatedObjRuntime
|
||||
}
|
||||
|
||||
// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache
|
||||
// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined
|
||||
// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is
|
||||
// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to
|
||||
// "one that was valid at some point in time", not "the one that I want".
|
||||
func (c *mutationCache) Mutation(obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
// this is a "nice to have", so failures shouldn't do anything weird
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if objRuntime, ok := obj.(runtime.Object); ok {
|
||||
if mutatedObj, exists := c.mutationCache.Get(key); exists {
|
||||
if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok {
|
||||
if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.mutationCache.Add(key, obj, c.ttl)
|
||||
}
|
||||
|
||||
// etcdObjectVersioner implements versioning and extracting etcd node information
|
||||
// for objects that have an embedded ObjectMeta or ListMeta field.
|
||||
type etcdObjectVersioner struct{}
|
||||
|
||||
// ObjectResourceVersion implements Versioner
|
||||
func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
version := accessor.GetResourceVersion()
|
||||
if len(version) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.ParseUint(version, 10, 64)
|
||||
}
|
||||
|
||||
// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings,
|
||||
// but etcd resource versions are special, they're actually ints, so we can easily compare them.
|
||||
func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int {
|
||||
lhsVersion, err := a.ObjectResourceVersion(lhs)
|
||||
if err != nil {
|
||||
// coder error
|
||||
panic(err)
|
||||
}
|
||||
rhsVersion, err := a.ObjectResourceVersion(rhs)
|
||||
if err != nil {
|
||||
// coder error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if lhsVersion == rhsVersion {
|
||||
return 0
|
||||
}
|
||||
if lhsVersion < rhsVersion {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
130
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
Normal file
130
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
)
|
||||
|
||||
var mutationDetectionEnabled = false
|
||||
|
||||
func init() {
|
||||
mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
|
||||
}
|
||||
|
||||
type CacheMutationDetector interface {
|
||||
AddObject(obj interface{})
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
func NewCacheMutationDetector(name string) CacheMutationDetector {
|
||||
if !mutationDetectionEnabled {
|
||||
return dummyMutationDetector{}
|
||||
}
|
||||
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
|
||||
}
|
||||
|
||||
type dummyMutationDetector struct{}
|
||||
|
||||
func (dummyMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
func (dummyMutationDetector) AddObject(obj interface{}) {
|
||||
}
|
||||
|
||||
// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated
|
||||
// It has a list of cached objects and their copies. I haven't thought of a way
|
||||
// to see WHO is mutating it, just that it's getting mutated.
|
||||
type defaultCacheMutationDetector struct {
|
||||
name string
|
||||
period time.Duration
|
||||
|
||||
lock sync.Mutex
|
||||
cachedObjs []cacheObj
|
||||
|
||||
// failureFunc is injectable for unit testing. If you don't have it, the process will panic.
|
||||
// This panic is intentional, since turning on this detection indicates you want a strong
|
||||
// failure signal. This failure is effectively a p0 bug and you can't trust process results
|
||||
// after a mutation anyway.
|
||||
failureFunc func(message string)
|
||||
}
|
||||
|
||||
// cacheObj holds the actual object and a copy
|
||||
type cacheObj struct {
|
||||
cached interface{}
|
||||
copied interface{}
|
||||
}
|
||||
|
||||
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
// we DON'T want protection from panics. If we're running this code, we want to die
|
||||
for {
|
||||
d.CompareObjects()
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-time.After(d.period):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object
|
||||
// but that covers the vast majority of our cached objects
|
||||
func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
|
||||
if _, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return
|
||||
}
|
||||
if obj, ok := obj.(runtime.Object); ok {
|
||||
copiedObj := obj.DeepCopyObject()
|
||||
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
}
|
||||
}
|
||||
|
||||
func (d *defaultCacheMutationDetector) CompareObjects() {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
|
||||
altered := false
|
||||
for i, obj := range d.cachedObjs {
|
||||
if !reflect.DeepEqual(obj.cached, obj.copied) {
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied))
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
|
||||
if altered {
|
||||
msg := fmt.Sprintf("cache %s modified", d.name)
|
||||
if d.failureFunc != nil {
|
||||
d.failureFunc(msg)
|
||||
return
|
||||
}
|
||||
panic(msg)
|
||||
}
|
||||
}
|
386
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
Normal file
386
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
// metrics tracks basic metric information about the reflector
|
||||
metrics *reflectorMetrics
|
||||
|
||||
// The type of object we expect to place in the store.
|
||||
expectedType reflect.Type
|
||||
// The destination to sync up with the watch source
|
||||
store Store
|
||||
// listerWatcher is used to perform lists and watches.
|
||||
listerWatcher ListerWatcher
|
||||
// period controls timing between one watch ending and
|
||||
// the beginning of the next one.
|
||||
period time.Duration
|
||||
resyncPeriod time.Duration
|
||||
ShouldResync func() bool
|
||||
// clock allows tests to manipulate time
|
||||
clock clock.Clock
|
||||
// lastSyncResourceVersion is the resource version token last
|
||||
// observed when doing a sync with the underlying store
|
||||
// it is thread safe, but not synchronized with the underlying store
|
||||
lastSyncResourceVersion string
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
}
|
||||
|
||||
var (
|
||||
// We try to spread the load on apiserver by setting timeouts for
|
||||
// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
|
||||
minWatchTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
|
||||
// The indexer is configured to key on namespace
|
||||
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
|
||||
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
|
||||
return indexer, reflector
|
||||
}
|
||||
|
||||
// NewReflector creates a new Reflector object which will keep the given store up to
|
||||
// date with the server's contents for the given resource. Reflector promises to
|
||||
// only put things in the store that have the type of expectedType, unless expectedType
|
||||
// is nil. If resyncPeriod is non-zero, then lists will be executed after every
|
||||
// resyncPeriod, so that you can use reflectors to periodically process everything as
|
||||
// well as incrementally processing the things that change.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
period: time.Second,
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func makeValidPrometheusMetricLabel(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"client-go/tools/cache/"}
|
||||
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
wait.Until(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
}, r.period, stopCh)
|
||||
}
|
||||
|
||||
var (
|
||||
// nothing will ever be sent down this channel
|
||||
neverExitWatch <-chan time.Time = make(chan time.Time)
|
||||
|
||||
// Used to indicate that watching stopped so that a resync could happen.
|
||||
errorResyncRequested = errors.New("resync channel fired")
|
||||
|
||||
// Used to indicate that watching stopped because of a signal from the stop
|
||||
// channel passed in from a client of the reflector.
|
||||
errorStopRequested = errors.New("Stop requested")
|
||||
)
|
||||
|
||||
// resyncChan returns a channel which will receive something when a resync is
|
||||
// required, and a cleanup function.
|
||||
func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
if r.resyncPeriod == 0 {
|
||||
return neverExitWatch, func() bool { return false }
|
||||
}
|
||||
// The cleanup function is required: imagine the scenario where watches
|
||||
// always fail so we end up listing frequently. Then, if we don't
|
||||
// manually stop the timer, we could end up with many timers active
|
||||
// concurrently.
|
||||
t := r.clock.NewTimer(r.resyncPeriod)
|
||||
return t.C(), t.Stop
|
||||
}
|
||||
|
||||
// ListAndWatch first lists all items and get the resource version at the moment of call,
|
||||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector " + r.name + " ListAndWatch")
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
list, err = r.listerWatcher.List(options)
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
defer close(cancelCh)
|
||||
go func() {
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer func() {
|
||||
cleanup() // Call the last one written into cleanup
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-resyncCh:
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
klog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
cleanup()
|
||||
resyncCh, cleanup = r.resyncChan()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options = metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
}
|
||||
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
|
||||
}
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case wait and resend watch request.
|
||||
if urlError, ok := err.(*url.Error); ok {
|
||||
if opError, ok := urlError.Err.(*net.OpError); ok {
|
||||
if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncWith replaces the store's items with the given list.
|
||||
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
|
||||
found := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
found = append(found, item)
|
||||
}
|
||||
return r.store.Replace(found, resourceVersion)
|
||||
}
|
||||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
start := r.clock.Now()
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return errorStopRequested
|
||||
case err := <-errc:
|
||||
return err
|
||||
case event, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
break loop
|
||||
}
|
||||
if event.Type == watch.Error {
|
||||
return apierrs.FromObject(event.Object)
|
||||
}
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
continue
|
||||
}
|
||||
meta, err := meta.Accessor(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
continue
|
||||
}
|
||||
newResourceVersion := meta.GetResourceVersion()
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
err := r.store.Add(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Modified:
|
||||
err := r.store.Update(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Deleted:
|
||||
// TODO: Will any consumers need access to the "last known
|
||||
// state", which is passed in event.Object? If so, may need
|
||||
// to change this.
|
||||
err := r.store.Delete(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
}
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
}
|
||||
*resourceVersion = newResourceVersion
|
||||
r.setLastSyncResourceVersion(newResourceVersion)
|
||||
eventCount++
|
||||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Now().Sub(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastSyncResourceVersion is the resource version observed when last sync with the underlying store
|
||||
// The value returned is not synchronized with access to the underlying store and is not thread-safe
|
||||
func (r *Reflector) LastSyncResourceVersion() string {
|
||||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
func (r *Reflector) setLastSyncResourceVersion(v string) {
|
||||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
}
|
119
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
Normal file
119
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file provides abstractions for setting the provider (e.g., prometheus)
|
||||
// of metrics.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GaugeMetric represents a single numerical value that can arbitrarily go up
|
||||
// and down.
|
||||
type GaugeMetric interface {
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// CounterMetric represents a single numerical value that only ever
|
||||
// goes up.
|
||||
type CounterMetric interface {
|
||||
Inc()
|
||||
}
|
||||
|
||||
// SummaryMetric captures individual observations.
|
||||
type SummaryMetric interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
type noopMetric struct{}
|
||||
|
||||
func (noopMetric) Inc() {}
|
||||
func (noopMetric) Dec() {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
func (noopMetric) Set(float64) {}
|
||||
|
||||
type reflectorMetrics struct {
|
||||
numberOfLists CounterMetric
|
||||
listDuration SummaryMetric
|
||||
numberOfItemsInList SummaryMetric
|
||||
|
||||
numberOfWatches CounterMetric
|
||||
numberOfShortWatches CounterMetric
|
||||
watchDuration SummaryMetric
|
||||
numberOfItemsInWatch SummaryMetric
|
||||
|
||||
lastResourceVersion GaugeMetric
|
||||
}
|
||||
|
||||
// MetricsProvider generates various metrics used by the reflector.
|
||||
type MetricsProvider interface {
|
||||
NewListsMetric(name string) CounterMetric
|
||||
NewListDurationMetric(name string) SummaryMetric
|
||||
NewItemsInListMetric(name string) SummaryMetric
|
||||
|
||||
NewWatchesMetric(name string) CounterMetric
|
||||
NewShortWatchesMetric(name string) CounterMetric
|
||||
NewWatchDurationMetric(name string) SummaryMetric
|
||||
NewItemsInWatchMetric(name string) SummaryMetric
|
||||
|
||||
NewLastResourceVersionMetric(name string) GaugeMetric
|
||||
}
|
||||
|
||||
type noopMetricsProvider struct{}
|
||||
|
||||
func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} }
|
||||
func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric {
|
||||
return noopMetric{}
|
||||
}
|
||||
|
||||
var metricsFactory = struct {
|
||||
metricsProvider MetricsProvider
|
||||
setProviders sync.Once
|
||||
}{
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newReflectorMetrics(name string) *reflectorMetrics {
|
||||
var ret *reflectorMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &reflectorMetrics{
|
||||
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
|
||||
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
|
||||
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
|
||||
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
|
||||
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
|
||||
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
|
||||
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
|
||||
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetReflectorMetricsProvider sets the metrics provider
|
||||
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
metricsFactory.metricsProvider = metricsProvider
|
||||
})
|
||||
}
|
597
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
Normal file
597
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
Normal file
@ -0,0 +1,597 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/buffer"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
|
||||
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
|
||||
// one behavior change compared to a standard Informer. When you receive a notification, the cache
|
||||
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
|
||||
// on the contents of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
|
||||
// has advantages over the broadcaster since it allows us to share a common cache across many
|
||||
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
|
||||
// watch.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
|
||||
// specified resync period. Events to a single handler are delivered sequentially, but there is
|
||||
// no coordination between different handlers.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// GetStore returns the Store.
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() Controller
|
||||
// Run starts the shared informer, which will be stopped when stopCh is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
// HasSynced returns true if the shared informer's store has synced.
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
// thread-safe.
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
AddIndexers(indexers Indexers) error
|
||||
GetIndexer() Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
realClock := &clock.RealClock{}
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{clock: realClock},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
|
||||
clock: realClock,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
|
||||
type InformerSynced func() bool
|
||||
|
||||
const (
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
syncedPollPeriod = 100 * time.Millisecond
|
||||
|
||||
// initialBufferSize is the initial number of event notifications that can be buffered.
|
||||
initialBufferSize = 1024
|
||||
)
|
||||
|
||||
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
|
||||
// if the controller should shutdown
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
for _, syncFunc := range cacheSyncs {
|
||||
if !syncFunc() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
stopCh)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("stop requested")
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(4).Infof("caches populated")
|
||||
return true
|
||||
}
|
||||
|
||||
type sharedIndexInformer struct {
|
||||
indexer Indexer
|
||||
controller Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
cacheMutationDetector CacheMutationDetector
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher ListerWatcher
|
||||
objectType runtime.Object
|
||||
|
||||
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
|
||||
// shouldResync to check if any of our listeners need a resync.
|
||||
resyncCheckPeriod time.Duration
|
||||
// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
|
||||
// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
|
||||
// value).
|
||||
defaultEventHandlerResyncPeriod time.Duration
|
||||
// clock allows for testability
|
||||
clock clock.Clock
|
||||
|
||||
started, stopped bool
|
||||
startedLock sync.Mutex
|
||||
|
||||
// blockDeltas gives a way to stop all event distribution so that a late event handler
|
||||
// can safely join the shared informer.
|
||||
blockDeltas sync.Mutex
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
// where a caller can `Run`. The run method is disconnected in this case, because higher
|
||||
// level logic will decide when to start the SharedInformer and related controller.
|
||||
// Because returning information back is always asynchronous, the legacy callers shouldn't
|
||||
// notice any change in behavior.
|
||||
type dummyController struct {
|
||||
informer *sharedIndexInformer
|
||||
}
|
||||
|
||||
func (v *dummyController) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (v *dummyController) HasSynced() bool {
|
||||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
func (c *dummyController) LastSyncResourceVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type updateNotification struct {
|
||||
oldObj interface{}
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type addNotification struct {
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type deleteNotification struct {
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
}
|
||||
|
||||
func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.controller.(*controller).clock = s.clock
|
||||
s.started = true
|
||||
}()
|
||||
|
||||
// Separate stop channel because Processor should be stopped strictly after controller
|
||||
processorStopCh := make(chan struct{})
|
||||
var wg wait.Group
|
||||
defer wg.Wait() // Wait for Processor to stop
|
||||
defer close(processorStopCh) // Tell Processor to stop
|
||||
wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
|
||||
wg.StartWithChannel(processorStopCh, s.processor.run)
|
||||
|
||||
defer func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
s.stopped = true // Don't want any new listeners
|
||||
}()
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return false
|
||||
}
|
||||
return s.controller.HasSynced()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return ""
|
||||
}
|
||||
return s.controller.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetStore() Store {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetIndexer() Indexer {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
return s.indexer.AddIndexers(indexers)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetController() Controller {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
|
||||
s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
|
||||
}
|
||||
|
||||
func determineResyncPeriod(desired, check time.Duration) time.Duration {
|
||||
if desired == 0 {
|
||||
return desired
|
||||
}
|
||||
if check == 0 {
|
||||
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
|
||||
return 0
|
||||
}
|
||||
if desired < check {
|
||||
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
|
||||
return check
|
||||
}
|
||||
return desired
|
||||
}
|
||||
|
||||
const minimumResyncPeriod = 1 * time.Second
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.stopped {
|
||||
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
|
||||
return
|
||||
}
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
|
||||
// accordingly
|
||||
s.resyncCheckPeriod = resyncPeriod
|
||||
s.processor.resyncCheckPeriodChanged(resyncPeriod)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
|
||||
|
||||
if !s.started {
|
||||
s.processor.addListener(listener)
|
||||
return
|
||||
}
|
||||
|
||||
// in order to safely join, we have to
|
||||
// 1. stop sending add/update/delete notifications
|
||||
// 2. do a list against the store
|
||||
// 3. send synthetic "Add" events to the new handler
|
||||
// 4. unblock
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
s.processor.addListener(listener)
|
||||
for _, item := range s.indexer.List() {
|
||||
listener.add(addNotification{newObj: item})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
isSync := d.Type == Sync
|
||||
s.cacheMutationDetector.AddObject(d.Object)
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object}, isSync)
|
||||
}
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sharedProcessor struct {
|
||||
listenersStarted bool
|
||||
listenersLock sync.RWMutex
|
||||
listeners []*processorListener
|
||||
syncingListeners []*processorListener
|
||||
clock clock.Clock
|
||||
wg wait.Group
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListener(listener *processorListener) {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.addListenerLocked(listener)
|
||||
if p.listenersStarted {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
|
||||
p.listeners = append(p.listeners, listener)
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
if sync {
|
||||
for _, listener := range p.syncingListeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
} else {
|
||||
for _, listener := range p.listeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
func() {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
p.wg.Start(listener.run)
|
||||
p.wg.Start(listener.pop)
|
||||
}
|
||||
p.listenersStarted = true
|
||||
}()
|
||||
<-stopCh
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
for _, listener := range p.listeners {
|
||||
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
|
||||
}
|
||||
p.wg.Wait() // Wait for all .pop() and .run() to stop
|
||||
}
|
||||
|
||||
// shouldResync queries every listener to determine if any of them need a resync, based on each
|
||||
// listener's resyncPeriod.
|
||||
func (p *sharedProcessor) shouldResync() bool {
|
||||
p.listenersLock.Lock()
|
||||
defer p.listenersLock.Unlock()
|
||||
|
||||
p.syncingListeners = []*processorListener{}
|
||||
|
||||
resyncNeeded := false
|
||||
now := p.clock.Now()
|
||||
for _, listener := range p.listeners {
|
||||
// need to loop through all the listeners to see if they need to resync so we can prepare any
|
||||
// listeners that are going to be resyncing.
|
||||
if listener.shouldResync(now) {
|
||||
resyncNeeded = true
|
||||
p.syncingListeners = append(p.syncingListeners, listener)
|
||||
listener.determineNextResync(now)
|
||||
}
|
||||
}
|
||||
return resyncNeeded
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) {
|
||||
p.listenersLock.RLock()
|
||||
defer p.listenersLock.RUnlock()
|
||||
|
||||
for _, listener := range p.listeners {
|
||||
resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod)
|
||||
listener.setResyncPeriod(resyncPeriod)
|
||||
}
|
||||
}
|
||||
|
||||
type processorListener struct {
|
||||
nextCh chan interface{}
|
||||
addCh chan interface{}
|
||||
|
||||
handler ResourceEventHandler
|
||||
|
||||
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
|
||||
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better.
|
||||
pendingNotifications buffer.RingGrowing
|
||||
|
||||
// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
|
||||
requestedResyncPeriod time.Duration
|
||||
// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
|
||||
// value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the
|
||||
// informer's overall resync check period.
|
||||
resyncPeriod time.Duration
|
||||
// nextResync is the earliest time the listener should get a full resync
|
||||
nextResync time.Time
|
||||
// resyncLock guards access to resyncPeriod and nextResync
|
||||
resyncLock sync.Mutex
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
|
||||
ret := &processorListener{
|
||||
nextCh: make(chan interface{}),
|
||||
addCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
|
||||
requestedResyncPeriod: requestedResyncPeriod,
|
||||
resyncPeriod: resyncPeriod,
|
||||
}
|
||||
|
||||
ret.determineNextResync(now)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
p.addCh <- notification
|
||||
}
|
||||
|
||||
func (p *processorListener) pop() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer close(p.nextCh) // Tell .run() to stop
|
||||
|
||||
var nextCh chan<- interface{}
|
||||
var notification interface{}
|
||||
for {
|
||||
select {
|
||||
case nextCh <- notification:
|
||||
// Notification dispatched
|
||||
var ok bool
|
||||
notification, ok = p.pendingNotifications.ReadOne()
|
||||
if !ok { // Nothing to pop
|
||||
nextCh = nil // Disable this select case
|
||||
}
|
||||
case notificationToAdd, ok := <-p.addCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if notification == nil { // No notification to pop (and pendingNotifications is empty)
|
||||
// Optimize the case - skip adding to pendingNotifications
|
||||
notification = notificationToAdd
|
||||
nextCh = p.nextCh
|
||||
} else { // There is already a notification waiting to be dispatched
|
||||
p.pendingNotifications.WriteOne(notificationToAdd)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processorListener) run() {
|
||||
// this call blocks until the channel is closed. When a panic happens during the notification
|
||||
// we will catch it, **the offending item will be skipped!**, and after a short delay (one second)
|
||||
// the next notification will be attempted. This is usually better than the alternative of never
|
||||
// delivering again.
|
||||
stopCh := make(chan struct{})
|
||||
wait.Until(func() {
|
||||
// this gives us a few quick retries before a long pause and then a few more quick retries
|
||||
err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
|
||||
for next := range p.nextCh {
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
}
|
||||
}
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
return true, nil
|
||||
})
|
||||
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
if err == nil {
|
||||
close(stopCh)
|
||||
}
|
||||
}, 1*time.Minute, stopCh)
|
||||
}
|
||||
|
||||
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
|
||||
// this always returns false.
|
||||
func (p *processorListener) shouldResync(now time.Time) bool {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
if p.resyncPeriod == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return now.After(p.nextResync) || now.Equal(p.nextResync)
|
||||
}
|
||||
|
||||
func (p *processorListener) determineNextResync(now time.Time) {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
p.nextResync = now.Add(p.resyncPeriod)
|
||||
}
|
||||
|
||||
func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) {
|
||||
p.resyncLock.Lock()
|
||||
defer p.resyncLock.Unlock()
|
||||
|
||||
p.resyncPeriod = resyncPeriod
|
||||
}
|
244
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
Executable file
244
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
Executable file
@ -0,0 +1,244 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
)
|
||||
|
||||
// Store is a generic object storage interface. Reflector knows how to watch a server
|
||||
// and update a store. A generic store is provided, which allows Reflector to be used
|
||||
// as a local caching system, and an LRU store, which allows Reflector to work like a
|
||||
// queue of items yet to be processed.
|
||||
//
|
||||
// Store makes no assumptions about stored object identity; it is the responsibility
|
||||
// of a Store implementation to provide a mechanism to correctly key objects and to
|
||||
// define the contract for obtaining objects by some arbitrary key type.
|
||||
type Store interface {
|
||||
Add(obj interface{}) error
|
||||
Update(obj interface{}) error
|
||||
Delete(obj interface{}) error
|
||||
List() []interface{}
|
||||
ListKeys() []string
|
||||
Get(obj interface{}) (item interface{}, exists bool, err error)
|
||||
GetByKey(key string) (item interface{}, exists bool, err error)
|
||||
|
||||
// Replace will delete the contents of the store, using instead the
|
||||
// given list. Store takes ownership of the list, you should not reference
|
||||
// it after calling this function.
|
||||
Replace([]interface{}, string) error
|
||||
Resync() error
|
||||
}
|
||||
|
||||
// KeyFunc knows how to make a key from an object. Implementations should be deterministic.
|
||||
type KeyFunc func(obj interface{}) (string, error)
|
||||
|
||||
// KeyError will be returned any time a KeyFunc gives an error; it includes the object
|
||||
// at fault.
|
||||
type KeyError struct {
|
||||
Obj interface{}
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error gives a human-readable description of the error.
|
||||
func (k KeyError) Error() string {
|
||||
return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
|
||||
}
|
||||
|
||||
// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
|
||||
// the object but not the object itself.
|
||||
type ExplicitKey string
|
||||
|
||||
// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make
|
||||
// keys for API objects which implement meta.Interface.
|
||||
// The key uses the format <namespace>/<name> unless <namespace> is empty, then
|
||||
// it's just <name>.
|
||||
//
|
||||
// TODO: replace key-as-string with a key-as-struct so that this
|
||||
// packing/unpacking won't be necessary.
|
||||
func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if key, ok := obj.(ExplicitKey); ok {
|
||||
return string(key), nil
|
||||
}
|
||||
meta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("object has no meta: %v", err)
|
||||
}
|
||||
if len(meta.GetNamespace()) > 0 {
|
||||
return meta.GetNamespace() + "/" + meta.GetName(), nil
|
||||
}
|
||||
return meta.GetName(), nil
|
||||
}
|
||||
|
||||
// SplitMetaNamespaceKey returns the namespace and name that
|
||||
// MetaNamespaceKeyFunc encoded into key.
|
||||
//
|
||||
// TODO: replace key-as-string with a key-as-struct so that this
|
||||
// packing/unpacking won't be necessary.
|
||||
func SplitMetaNamespaceKey(key string) (namespace, name string, err error) {
|
||||
parts := strings.Split(key, "/")
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
// name only, no namespace
|
||||
return "", parts[0], nil
|
||||
case 2:
|
||||
// namespace and name
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
return "", "", fmt.Errorf("unexpected key format: %q", key)
|
||||
}
|
||||
|
||||
// cache responsibilities are limited to:
|
||||
// 1. Computing keys for objects via keyFunc
|
||||
// 2. Invoking methods of a ThreadSafeStorage interface
|
||||
type cache struct {
|
||||
// cacheStorage bears the burden of thread safety for the cache
|
||||
cacheStorage ThreadSafeStore
|
||||
// keyFunc is used to make the key for objects stored in and retrieved from items, and
|
||||
// should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
}
|
||||
|
||||
var _ Store = &cache{}
|
||||
|
||||
// Add inserts an item into the cache.
|
||||
func (c *cache) Add(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Add(key, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update sets an item in the cache to its updated state.
|
||||
func (c *cache) Update(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Update(key, obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache.
|
||||
func (c *cache) Delete(obj interface{}) error {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
// List is completely threadsafe as long as you treat all items as immutable.
|
||||
func (c *cache) List() []interface{} {
|
||||
return c.cacheStorage.List()
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the cache.
|
||||
func (c *cache) ListKeys() []string {
|
||||
return c.cacheStorage.ListKeys()
|
||||
}
|
||||
|
||||
// GetIndexers returns the indexers of cache
|
||||
func (c *cache) GetIndexers() Indexers {
|
||||
return c.cacheStorage.GetIndexers()
|
||||
}
|
||||
|
||||
// Index returns a list of items that match on the index function
|
||||
// Index is thread-safe so long as you treat all items as immutable
|
||||
func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) {
|
||||
return c.cacheStorage.Index(indexName, obj)
|
||||
}
|
||||
|
||||
func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
return c.cacheStorage.IndexKeys(indexName, indexKey)
|
||||
}
|
||||
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
func (c *cache) ListIndexFuncValues(indexName string) []string {
|
||||
return c.cacheStorage.ListIndexFuncValues(indexName)
|
||||
}
|
||||
|
||||
func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
|
||||
return c.cacheStorage.ByIndex(indexName, indexKey)
|
||||
}
|
||||
|
||||
func (c *cache) AddIndexers(newIndexers Indexers) error {
|
||||
return c.cacheStorage.AddIndexers(newIndexers)
|
||||
}
|
||||
|
||||
// Get returns the requested item, or sets exists=false.
|
||||
// Get is completely threadsafe as long as you treat all items as immutable.
|
||||
func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) {
|
||||
key, err := c.keyFunc(obj)
|
||||
if err != nil {
|
||||
return nil, false, KeyError{obj, err}
|
||||
}
|
||||
return c.GetByKey(key)
|
||||
}
|
||||
|
||||
// GetByKey returns the request item, or exists=false.
|
||||
// GetByKey is completely threadsafe as long as you treat all items as immutable.
|
||||
func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
item, exists = c.cacheStorage.Get(key)
|
||||
return item, exists, nil
|
||||
}
|
||||
|
||||
// Replace will delete the contents of 'c', using instead the given list.
|
||||
// 'c' takes ownership of the list, you should not reference the list again
|
||||
// after calling this function.
|
||||
func (c *cache) Replace(list []interface{}, resourceVersion string) error {
|
||||
items := make(map[string]interface{}, len(list))
|
||||
for _, item := range list {
|
||||
key, err := c.keyFunc(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = item
|
||||
}
|
||||
c.cacheStorage.Replace(items, resourceVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync touches all items in the store to force processing
|
||||
func (c *cache) Resync() error {
|
||||
return c.cacheStorage.Resync()
|
||||
}
|
||||
|
||||
// NewStore returns a Store implemented simply with a map and a lock.
|
||||
func NewStore(keyFunc KeyFunc) Store {
|
||||
return &cache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// NewIndexer returns an Indexer implemented simply with a map and a lock.
|
||||
func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer {
|
||||
return &cache{
|
||||
cacheStorage: NewThreadSafeStore(indexers, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
}
|
||||
}
|
311
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
Normal file
311
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
Normal file
@ -0,0 +1,311 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
|
||||
// TL;DR caveats: you must not modify anything returned by Get or List as it will break
|
||||
// the indexing feature in addition to not being thread safe.
|
||||
//
|
||||
// The guarantees of thread safety provided by List/Get are only valid if the caller
|
||||
// treats returned items as read-only. For example, a pointer inserted in the store
|
||||
// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get`
|
||||
// on the same key and modify the pointer in a non-thread-safe way. Also note that
|
||||
// modifying objects stored by the indexers (if any) will *not* automatically lead
|
||||
// to a re-index. So it's not a good idea to directly modify the objects returned by
|
||||
// Get/List, in general.
|
||||
type ThreadSafeStore interface {
|
||||
Add(key string, obj interface{})
|
||||
Update(key string, obj interface{})
|
||||
Delete(key string)
|
||||
Get(key string) (item interface{}, exists bool)
|
||||
List() []interface{}
|
||||
ListKeys() []string
|
||||
Replace(map[string]interface{}, string)
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
ListIndexFuncValues(name string) []string
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
// in the store, the results are undefined.
|
||||
AddIndexers(newIndexers Indexers) error
|
||||
Resync() error
|
||||
}
|
||||
|
||||
// threadSafeMap implements ThreadSafeStore
|
||||
type threadSafeMap struct {
|
||||
lock sync.RWMutex
|
||||
items map[string]interface{}
|
||||
|
||||
// indexers maps a name to an IndexFunc
|
||||
indexers Indexers
|
||||
// indices maps a name to an Index
|
||||
indices Indices
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Add(key string, obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Update(key string, obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Delete(key string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if obj, exists := c.items[key]; exists {
|
||||
c.deleteFromIndices(obj, key)
|
||||
delete(c.items, key)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
item, exists = c.items[key]
|
||||
return item, exists
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) List() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
list := make([]interface{}, 0, len(c.items))
|
||||
for _, item := range c.items {
|
||||
list = append(list, item)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ListKeys returns a list of all the keys of the objects currently
|
||||
// in the threadSafeMap.
|
||||
func (c *threadSafeMap) ListKeys() []string {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
list := make([]string, 0, len(c.items))
|
||||
for key := range c.items {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.items = items
|
||||
|
||||
// rebuild any index
|
||||
c.indices = Indices{}
|
||||
for key, item := range c.items {
|
||||
c.updateIndices(nil, item, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Index returns a list of items that match on the index function
|
||||
// Index is thread-safe so long as you treat all items as immutable
|
||||
func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
indexKeys, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
var returnKeySet sets.String
|
||||
if len(indexKeys) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
returnKeySet = index[indexKeys[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
returnKeySet = sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
for key := range index[indexKey] {
|
||||
returnKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list := make([]interface{}, 0, returnKeySet.Len())
|
||||
for absoluteKey := range returnKeySet {
|
||||
list = append(list, c.items[absoluteKey])
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// ByIndex returns a list of items that match an exact value on the index function
|
||||
func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexKey]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for _, key := range set.List() {
|
||||
list = append(list, c.items[key])
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// IndexKeys returns a list of keys that match on the index function.
|
||||
// IndexKeys is thread-safe so long as you treat all items as immutable.
|
||||
func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
indexFunc := c.indexers[indexName]
|
||||
if indexFunc == nil {
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexKey]
|
||||
return set.List(), nil
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
index := c.indices[indexName]
|
||||
names := make([]string, 0, len(index))
|
||||
for key := range index {
|
||||
names = append(names, key)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) GetIndexers() Indexers {
|
||||
return c.indexers
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if len(c.items) > 0 {
|
||||
return fmt.Errorf("cannot add indexers to running index")
|
||||
}
|
||||
|
||||
oldKeys := sets.StringKeySet(c.indexers)
|
||||
newKeys := sets.StringKeySet(newIndexers)
|
||||
|
||||
if oldKeys.HasAny(newKeys.List()...) {
|
||||
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
|
||||
}
|
||||
|
||||
for k, v := range newIndexers {
|
||||
c.indexers[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
// if we got an old object, we need to remove it before we add it again
|
||||
if oldObj != nil {
|
||||
c.deleteFromIndices(oldObj, key)
|
||||
}
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(newObj)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
c.indices[name] = index
|
||||
}
|
||||
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteFromIndices removes the object from each of the managed indexes
|
||||
// it is intended to be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
continue
|
||||
}
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set != nil {
|
||||
set.Delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Resync() error {
|
||||
// Nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
|
||||
return &threadSafeMap{
|
||||
items: map[string]interface{}{},
|
||||
indexers: indexers,
|
||||
indices: indices,
|
||||
}
|
||||
}
|
83
vendor/k8s.io/client-go/tools/cache/undelta_store.go
generated
vendored
Normal file
83
vendor/k8s.io/client-go/tools/cache/undelta_store.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
// UndeltaStore listens to incremental updates and sends complete state on every change.
|
||||
// It implements the Store interface so that it can receive a stream of mirrored objects
|
||||
// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change
|
||||
// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc.
|
||||
// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results
|
||||
// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values.
|
||||
// PushFunc should be thread safe.
|
||||
type UndeltaStore struct {
|
||||
Store
|
||||
PushFunc func([]interface{})
|
||||
}
|
||||
|
||||
// Assert that it implements the Store interface.
|
||||
var _ Store = &UndeltaStore{}
|
||||
|
||||
// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods.
|
||||
// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc}
|
||||
// and the List. So, the following can happen, resulting in two identical calls to PushFunc.
|
||||
// time thread 1 thread 2
|
||||
// 0 UndeltaStore.Add(a)
|
||||
// 1 UndeltaStore.Add(b)
|
||||
// 2 Store.Add(a)
|
||||
// 3 Store.Add(b)
|
||||
// 4 Store.List() -> [a,b]
|
||||
// 5 Store.List() -> [a,b]
|
||||
|
||||
func (u *UndeltaStore) Add(obj interface{}) error {
|
||||
if err := u.Store.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UndeltaStore) Update(obj interface{}) error {
|
||||
if err := u.Store.Update(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UndeltaStore) Delete(obj interface{}) error {
|
||||
if err := u.Store.Delete(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error {
|
||||
if err := u.Store.Replace(list, resourceVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
u.PushFunc(u.Store.List())
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewUndeltaStore returns an UndeltaStore implemented with a Store.
|
||||
func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore {
|
||||
return &UndeltaStore{
|
||||
Store: NewStore(keyFunc),
|
||||
PushFunc: pushFunc,
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user