mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update all k8s packages to 0.27.2
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
07b05616a0
commit
2551a0b05f
92
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
92
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@ -18,6 +18,7 @@ package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -50,11 +51,12 @@ type Config struct {
|
||||
Process ProcessFunc
|
||||
|
||||
// ObjectType is an example object of the type this controller is
|
||||
// expected to handle. Only the type needs to be right, except
|
||||
// that when that is `unstructured.Unstructured` the object's
|
||||
// `"apiVersion"` and `"kind"` must also be right.
|
||||
// expected to handle.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// ObjectDescription is the description to use when logging type-specific information about this controller.
|
||||
ObjectDescription string
|
||||
|
||||
// FullResyncPeriod is the period at which ShouldResync is considered.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
@ -84,7 +86,7 @@ type Config struct {
|
||||
type ShouldResyncFunc func() bool
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
type ProcessFunc func(obj interface{}, isInInitialList bool) error
|
||||
|
||||
// `*controller` implements Controller
|
||||
type controller struct {
|
||||
@ -131,18 +133,24 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
<-stopCh
|
||||
c.config.Queue.Close()
|
||||
}()
|
||||
r := NewReflector(
|
||||
r := NewReflectorWithOptions(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
ReflectorOptions{
|
||||
ResyncPeriod: c.config.FullResyncPeriod,
|
||||
TypeDescription: c.config.ObjectDescription,
|
||||
Clock: c.clock,
|
||||
},
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.WatchListPageSize = c.config.WatchListPageSize
|
||||
r.clock = c.clock
|
||||
if c.config.WatchErrorHandler != nil {
|
||||
r.watchErrorHandler = c.config.WatchErrorHandler
|
||||
}
|
||||
if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 {
|
||||
r.UseWatchList = true
|
||||
}
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
@ -211,7 +219,7 @@ func (c *controller) processLoop() {
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnAdd(obj interface{}, isInInitialList bool)
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
OnDelete(obj interface{})
|
||||
}
|
||||
@ -220,6 +228,9 @@ type ResourceEventHandler interface {
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler. This adapter does not remove the prohibition against
|
||||
// modifying the objects.
|
||||
//
|
||||
// See ResourceEventHandlerDetailedFuncs if your use needs to propagate
|
||||
// HasSynced.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
@ -227,7 +238,7 @@ type ResourceEventHandlerFuncs struct {
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj)
|
||||
}
|
||||
@ -247,6 +258,36 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandlerDetailedFuncs is exactly like ResourceEventHandlerFuncs
|
||||
// except its AddFunc accepts the isInInitialList parameter, for propagating
|
||||
// HasSynced.
|
||||
type ResourceEventHandlerDetailedFuncs struct {
|
||||
AddFunc func(obj interface{}, isInInitialList bool)
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
DeleteFunc func(obj interface{})
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerDetailedFuncs) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj, isInInitialList)
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate calls UpdateFunc if it's not nil.
|
||||
func (r ResourceEventHandlerDetailedFuncs) OnUpdate(oldObj, newObj interface{}) {
|
||||
if r.UpdateFunc != nil {
|
||||
r.UpdateFunc(oldObj, newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls DeleteFunc if it's not nil.
|
||||
func (r ResourceEventHandlerDetailedFuncs) OnDelete(obj interface{}) {
|
||||
if r.DeleteFunc != nil {
|
||||
r.DeleteFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// FilteringResourceEventHandler applies the provided filter to all events coming
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
@ -258,11 +299,11 @@ type FilteringResourceEventHandler struct {
|
||||
}
|
||||
|
||||
// OnAdd calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnAdd(obj)
|
||||
r.Handler.OnAdd(obj, isInInitialList)
|
||||
}
|
||||
|
||||
// OnUpdate ensures the proper handler is called depending on whether the filter matches
|
||||
@ -273,7 +314,7 @@ func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
case newer && older:
|
||||
r.Handler.OnUpdate(oldObj, newObj)
|
||||
case newer && !older:
|
||||
r.Handler.OnAdd(newObj)
|
||||
r.Handler.OnAdd(newObj, false)
|
||||
case !newer && older:
|
||||
r.Handler.OnDelete(oldObj)
|
||||
default:
|
||||
@ -353,17 +394,6 @@ func NewIndexerInformer(
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
|
||||
}
|
||||
|
||||
// TransformFunc allows for transforming an object before it will be processed
|
||||
// and put into the controller cache and before the corresponding handlers will
|
||||
// be called on it.
|
||||
// TransformFunc (similarly to ResourceEventHandler functions) should be able
|
||||
// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown
|
||||
//
|
||||
// The most common usage pattern is to clean-up some parts of the object to
|
||||
// reduce component memory usage if a given component doesn't care about them.
|
||||
// given controller doesn't care for them
|
||||
type TransformFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// NewTransformingInformer returns a Store and a controller for populating
|
||||
// the store while also providing event notifications. You should only used
|
||||
// the returned Store for Get/List operations; Add/Modify/Deletes will cause
|
||||
@ -411,19 +441,12 @@ func processDeltas(
|
||||
// Object which receives event notifications from the given deltas
|
||||
handler ResourceEventHandler,
|
||||
clientState Store,
|
||||
transformer TransformFunc,
|
||||
deltas Deltas,
|
||||
isInInitialList bool,
|
||||
) error {
|
||||
// from oldest to newest
|
||||
for _, d := range deltas {
|
||||
obj := d.Object
|
||||
if transformer != nil {
|
||||
var err error
|
||||
obj, err = transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
@ -436,7 +459,7 @@ func processDeltas(
|
||||
if err := clientState.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnAdd(obj)
|
||||
handler.OnAdd(obj, isInInitialList)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(obj); err != nil {
|
||||
@ -475,6 +498,7 @@ func newInformer(
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: clientState,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
Transformer: transformer,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
@ -484,9 +508,9 @@ func newInformer(
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
Process: func(obj interface{}, isInInitialList bool) error {
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(h, clientState, transformer, deltas)
|
||||
return processDeltas(h, clientState, deltas, isInInitialList)
|
||||
}
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
},
|
||||
|
142
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
142
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@ -51,6 +51,10 @@ type DeltaFIFOOptions struct {
|
||||
// When true, `Replaced` events will be sent for items passed to a Replace() call.
|
||||
// When false, `Sync` events will be sent instead.
|
||||
EmitDeltaTypeReplaced bool
|
||||
|
||||
// If set, will be called for objects before enqueueing them. Please
|
||||
// see the comment on TransformFunc for details.
|
||||
Transformer TransformFunc
|
||||
}
|
||||
|
||||
// DeltaFIFO is like FIFO, but differs in two ways. One is that the
|
||||
@ -129,8 +133,32 @@ type DeltaFIFO struct {
|
||||
// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
|
||||
// DeltaType when Replace() is called (to preserve backwards compat).
|
||||
emitDeltaTypeReplaced bool
|
||||
|
||||
// Called with every object if non-nil.
|
||||
transformer TransformFunc
|
||||
}
|
||||
|
||||
// TransformFunc allows for transforming an object before it will be processed.
|
||||
// TransformFunc (similarly to ResourceEventHandler functions) should be able
|
||||
// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown.
|
||||
//
|
||||
// New in v1.27: In such cases, the contained object will already have gone
|
||||
// through the transform object separately (when it was added / updated prior
|
||||
// to the delete), so the TransformFunc can likely safely ignore such objects
|
||||
// (i.e., just return the input object).
|
||||
//
|
||||
// The most common usage pattern is to clean-up some parts of the object to
|
||||
// reduce component memory usage if a given component doesn't care about them.
|
||||
//
|
||||
// New in v1.27: unless the object is a DeletedFinalStateUnknown, TransformFunc
|
||||
// sees the object before any other actor, and it is now safe to mutate the
|
||||
// object in place instead of making a copy.
|
||||
//
|
||||
// Note that TransformFunc is called while inserting objects into the
|
||||
// notification queue and is therefore extremely performance sensitive; please
|
||||
// do not do anything that will take a long time.
|
||||
type TransformFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
type DeltaType string
|
||||
|
||||
@ -227,6 +255,7 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
|
||||
knownObjects: opts.KnownObjects,
|
||||
|
||||
emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced,
|
||||
transformer: opts.Transformer,
|
||||
}
|
||||
f.cond.L = &f.lock
|
||||
return f
|
||||
@ -271,6 +300,10 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
func (f *DeltaFIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.hasSynced_locked()
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) hasSynced_locked() bool {
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
@ -411,6 +444,21 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// Every object comes through this code path once, so this is a good
|
||||
// place to call the transform func. If obj is a
|
||||
// DeletedFinalStateUnknown tombstone, then the containted inner object
|
||||
// will already have gone through the transformer, but we document that
|
||||
// this can happen. In cases involving Replace(), such an object can
|
||||
// come through multiple times.
|
||||
if f.transformer != nil {
|
||||
var err error
|
||||
obj, err = f.transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
oldDeltas := f.items[id]
|
||||
newDeltas := append(oldDeltas, Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
@ -526,6 +574,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
isInInitialList := !f.hasSynced_locked()
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
depth := len(f.queue)
|
||||
@ -551,7 +600,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"})
|
||||
defer trace.LogIfLong(100 * time.Millisecond)
|
||||
}
|
||||
err := process(item)
|
||||
err := process(item, isInInitialList)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
@ -566,12 +615,11 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// using the Sync or Replace DeltaType and then (2) it does some deletions.
|
||||
// In particular: for every pre-existing key K that is not the key of
|
||||
// an object in `list` there is the effect of
|
||||
// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object
|
||||
// of K. If `f.knownObjects == nil` then the pre-existing keys are
|
||||
// those in `f.items` and the current object of K is the `.Newest()`
|
||||
// of the Deltas associated with K. Otherwise the pre-existing keys
|
||||
// are those listed by `f.knownObjects` and the current object of K is
|
||||
// what `f.knownObjects.GetByKey(K)` returns.
|
||||
// `Delete(DeletedFinalStateUnknown{K, O})` where O is the latest known
|
||||
// object of K. The pre-existing keys are those in the union set of the keys in
|
||||
// `f.items` and `f.knownObjects` (if not nil). The last known object for key K is
|
||||
// the one present in the last delta in `f.items`. If there is no delta for K
|
||||
// in `f.items`, it is the object in `f.knownObjects`
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@ -595,56 +643,54 @@ func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if f.knownObjects == nil {
|
||||
// Do deletion detection against our own list.
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
// Do deletion detection against objects in the queue
|
||||
queuedDeletions := 0
|
||||
for k, oldItem := range f.items {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
// Delete pre-existing items not in the new list.
|
||||
// This could happen if watch deletion event was missed while
|
||||
// disconnected from apiserver.
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
|
||||
// if the previous object is a DeletedFinalStateUnknown, we have to extract the actual Object
|
||||
if d, ok := deletedObj.(DeletedFinalStateUnknown); ok {
|
||||
deletedObj = d.Obj
|
||||
}
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if f.knownObjects != nil {
|
||||
// Detect deletions for objects not present in the queue, but present in KnownObjects
|
||||
knownKeys := f.knownObjects.ListKeys()
|
||||
for _, k := range knownKeys {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
// Delete pre-existing items not in the new list.
|
||||
// This could happen if watch deletion event was missed while
|
||||
// disconnected from apiserver.
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
if len(f.items[k]) > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
// While there shouldn't be any queued deletions in the initial
|
||||
// population of the queue, it's better to be on the safe side.
|
||||
f.initialPopulationCount = keys.Len() + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detect deletions not already in the queue.
|
||||
knownKeys := f.knownObjects.ListKeys()
|
||||
queuedDeletions := 0
|
||||
for _, k := range knownKeys {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
deletedObj, exists, err := f.knownObjects.GetByKey(k)
|
||||
if err != nil {
|
||||
deletedObj = nil
|
||||
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
|
||||
} else if !exists {
|
||||
deletedObj = nil
|
||||
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
|
||||
}
|
||||
queuedDeletions++
|
||||
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !f.populated {
|
||||
|
14
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
14
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
|
||||
// PopProcessFunc is passed to Pop() method of Queue interface.
|
||||
// It is supposed to process the accumulator popped from the queue.
|
||||
type PopProcessFunc func(interface{}) error
|
||||
type PopProcessFunc func(obj interface{}, isInInitialList bool) error
|
||||
|
||||
// ErrRequeue may be returned by a PopProcessFunc to safely requeue
|
||||
// the current item. The value of Err will be returned from Pop.
|
||||
@ -82,9 +82,12 @@ type Queue interface {
|
||||
// Pop is helper function for popping from Queue.
|
||||
// WARNING: Do NOT use this function in non-test code to avoid races
|
||||
// unless you really really really really know what you are doing.
|
||||
//
|
||||
// NOTE: This function is deprecated and may be removed in the future without
|
||||
// additional warning.
|
||||
func Pop(queue Queue) interface{} {
|
||||
var result interface{}
|
||||
queue.Pop(func(obj interface{}) error {
|
||||
queue.Pop(func(obj interface{}, isInInitialList bool) error {
|
||||
result = obj
|
||||
return nil
|
||||
})
|
||||
@ -149,6 +152,10 @@ func (f *FIFO) Close() {
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.hasSynced_locked()
|
||||
}
|
||||
|
||||
func (f *FIFO) hasSynced_locked() bool {
|
||||
return f.populated && f.initialPopulationCount == 0
|
||||
}
|
||||
|
||||
@ -287,6 +294,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
|
||||
f.cond.Wait()
|
||||
}
|
||||
isInInitialList := !f.hasSynced_locked()
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
if f.initialPopulationCount > 0 {
|
||||
@ -298,7 +306,7 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
err := process(item)
|
||||
err := process(item, isInInitialList)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
err = e.Err
|
||||
|
483
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
483
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"io"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -40,6 +41,7 @@ import (
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/pointer"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
@ -49,12 +51,11 @@ const defaultExpectedTypeName = "<unspecified>"
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
|
||||
// The name of the type we expect to place in the store. The name
|
||||
// will be the stringification of expectedGVK if provided, and the
|
||||
// stringification of expectedType otherwise. It is for display
|
||||
// only, and should not be used for parsing or comparison.
|
||||
expectedTypeName string
|
||||
typeDescription string
|
||||
// An example object of the type we expect to place in the store.
|
||||
// Only the type needs to be right, except that when that is
|
||||
// `unstructured.Unstructured` the object's `"apiVersion"` and
|
||||
@ -66,17 +67,11 @@ type Reflector struct {
|
||||
store Store
|
||||
// listerWatcher is used to perform lists and watches.
|
||||
listerWatcher ListerWatcher
|
||||
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
// MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch.
|
||||
MaxInternalErrorRetryDuration time.Duration
|
||||
|
||||
resyncPeriod time.Duration
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
ShouldResync func() bool
|
||||
resyncPeriod time.Duration
|
||||
// clock allows tests to manipulate time
|
||||
clock clock.Clock
|
||||
// paginatedResult defines whether pagination should be forced for list calls.
|
||||
@ -91,6 +86,8 @@ type Reflector struct {
|
||||
isLastSyncResourceVersionUnavailable bool
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
// If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data
|
||||
// (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0")
|
||||
@ -99,8 +96,19 @@ type Reflector struct {
|
||||
// etcd, which is significantly less efficient and may lead to serious performance and
|
||||
// scalability problems.
|
||||
WatchListPageSize int64
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
ShouldResync func() bool
|
||||
// MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch.
|
||||
MaxInternalErrorRetryDuration time.Duration
|
||||
// UseWatchList if turned on instructs the reflector to open a stream to bring data from the API server.
|
||||
// Streaming has the primary advantage of using fewer server's resources to fetch data.
|
||||
//
|
||||
// The old behaviour establishes a LIST request which gets data in chunks.
|
||||
// Paginated list is less efficient and depending on the actual size of objects
|
||||
// might result in an increased memory consumption of the APIServer.
|
||||
//
|
||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details
|
||||
UseWatchList bool
|
||||
}
|
||||
|
||||
// ResourceVersionUpdater is an interface that allows store implementation to
|
||||
@ -131,13 +139,13 @@ func DefaultWatchErrorHandler(r *Reflector, err error) {
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err)
|
||||
case err == io.EOF:
|
||||
// watch closed normally
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.typeDescription, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.typeDescription, err))
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,7 +163,40 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa
|
||||
return indexer, reflector
|
||||
}
|
||||
|
||||
// NewReflector creates a new Reflector object which will keep the
|
||||
// NewReflector creates a new Reflector with its name defaulted to the closest source_file.go:line in the call stack
|
||||
// that is outside this package. See NewReflectorWithOptions for further information.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{ResyncPeriod: resyncPeriod})
|
||||
}
|
||||
|
||||
// NewNamedReflector creates a new Reflector with the specified name. See NewReflectorWithOptions for further
|
||||
// information.
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewReflectorWithOptions(lw, expectedType, store, ReflectorOptions{Name: name, ResyncPeriod: resyncPeriod})
|
||||
}
|
||||
|
||||
// ReflectorOptions configures a Reflector.
|
||||
type ReflectorOptions struct {
|
||||
// Name is the Reflector's name. If unset/unspecified, the name defaults to the closest source_file.go:line
|
||||
// in the call stack that is outside this package.
|
||||
Name string
|
||||
|
||||
// TypeDescription is the Reflector's type description. If unset/unspecified, the type description is defaulted
|
||||
// using the following rules: if the expectedType passed to NewReflectorWithOptions was nil, the type description is
|
||||
// "<unspecified>". If the expectedType is an instance of *unstructured.Unstructured and its apiVersion and kind fields
|
||||
// are set, the type description is the string encoding of those. Otherwise, the type description is set to the
|
||||
// go type of expectedType..
|
||||
TypeDescription string
|
||||
|
||||
// ResyncPeriod is the Reflector's resync period. If unset/unspecified, the resync period defaults to 0
|
||||
// (do not resync).
|
||||
ResyncPeriod time.Duration
|
||||
|
||||
// Clock allows tests to control time. If unset defaults to clock.RealClock{}
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
// NewReflectorWithOptions creates a new Reflector object which will keep the
|
||||
// given store up to date with the server's contents for the given
|
||||
// resource. Reflector promises to only put things in the store that
|
||||
// have the type of expectedType, unless expectedType is nil. If
|
||||
@ -165,49 +206,74 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa
|
||||
// "yes". This enables you to use reflectors to periodically process
|
||||
// everything as well as incrementally processing the things that
|
||||
// change.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
realClock := &clock.RealClock{}
|
||||
func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store Store, options ReflectorOptions) *Reflector {
|
||||
reflectorClock := options.Clock
|
||||
if reflectorClock == nil {
|
||||
reflectorClock = clock.RealClock{}
|
||||
}
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
name: options.Name,
|
||||
resyncPeriod: options.ResyncPeriod,
|
||||
typeDescription: options.TypeDescription,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
|
||||
// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
|
||||
// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: realClock,
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
|
||||
initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
|
||||
clock: reflectorClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
}
|
||||
r.setExpectedType(expectedType)
|
||||
|
||||
if r.name == "" {
|
||||
r.name = naming.GetNameFromCallsite(internalPackages...)
|
||||
}
|
||||
|
||||
if r.typeDescription == "" {
|
||||
r.typeDescription = getTypeDescriptionFromObject(expectedType)
|
||||
}
|
||||
|
||||
if r.expectedGVK == nil {
|
||||
r.expectedGVK = getExpectedGVKFromObject(expectedType)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Reflector) setExpectedType(expectedType interface{}) {
|
||||
r.expectedType = reflect.TypeOf(expectedType)
|
||||
if r.expectedType == nil {
|
||||
r.expectedTypeName = defaultExpectedTypeName
|
||||
return
|
||||
func getTypeDescriptionFromObject(expectedType interface{}) string {
|
||||
if expectedType == nil {
|
||||
return defaultExpectedTypeName
|
||||
}
|
||||
|
||||
r.expectedTypeName = r.expectedType.String()
|
||||
reflectDescription := reflect.TypeOf(expectedType).String()
|
||||
|
||||
if obj, ok := expectedType.(*unstructured.Unstructured); ok {
|
||||
// Use gvk to check that watch event objects are of the desired type.
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name)
|
||||
return
|
||||
}
|
||||
r.expectedGVK = &gvk
|
||||
r.expectedTypeName = gvk.String()
|
||||
obj, ok := expectedType.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return reflectDescription
|
||||
}
|
||||
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
return reflectDescription
|
||||
}
|
||||
|
||||
return gvk.String()
|
||||
}
|
||||
|
||||
func getExpectedGVKFromObject(expectedType interface{}) *schema.GroupVersionKind {
|
||||
obj, ok := expectedType.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &gvk
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
@ -218,13 +284,13 @@ var internalPackages = []string{"client-go/tools/cache/"}
|
||||
// objects and subsequent deltas.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Starting reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name)
|
||||
wait.BackoffUntil(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
r.watchErrorHandler(r, err)
|
||||
}
|
||||
}, r.backoffManager, true, stopCh)
|
||||
klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.typeDescription, r.resyncPeriod, r.name)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -254,42 +320,75 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.typeDescription, r.name)
|
||||
var err error
|
||||
var w watch.Interface
|
||||
fallbackToList := !r.UseWatchList
|
||||
|
||||
err := r.list(stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
if r.UseWatchList {
|
||||
w, err = r.watchList(stopCh)
|
||||
if w == nil && err == nil {
|
||||
// stopCh was closed
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
if !apierrors.IsInvalid(err) {
|
||||
return err
|
||||
}
|
||||
klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic")
|
||||
fallbackToList = true
|
||||
// Ensure that we won't accidentally pass some garbage down the watch.
|
||||
w = nil
|
||||
}
|
||||
}
|
||||
|
||||
if fallbackToList {
|
||||
err = r.list(stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
defer close(cancelCh)
|
||||
go func() {
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer func() {
|
||||
cleanup() // Call the last one written into cleanup
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-resyncCh:
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
klog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
cleanup()
|
||||
resyncCh, cleanup = r.resyncChan()
|
||||
}
|
||||
}()
|
||||
go r.startResync(stopCh, cancelCh, resyncerrc)
|
||||
return r.watch(w, stopCh, resyncerrc)
|
||||
}
|
||||
|
||||
// startResync periodically calls r.store.Resync() method.
|
||||
// Note that this method is blocking and should be
|
||||
// called in a separate goroutine.
|
||||
func (r *Reflector) startResync(stopCh <-chan struct{}, cancelCh <-chan struct{}, resyncerrc chan error) {
|
||||
resyncCh, cleanup := r.resyncChan()
|
||||
defer func() {
|
||||
cleanup() // Call the last one written into cleanup
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-resyncCh:
|
||||
case <-stopCh:
|
||||
return
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
if r.ShouldResync == nil || r.ShouldResync() {
|
||||
klog.V(4).Infof("%s: forcing resync", r.name)
|
||||
if err := r.store.Resync(); err != nil {
|
||||
resyncerrc <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
cleanup()
|
||||
resyncCh, cleanup = r.resyncChan()
|
||||
}
|
||||
}
|
||||
|
||||
// watch simply starts a watch request with the server.
|
||||
func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc chan error) error {
|
||||
var err error
|
||||
retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock)
|
||||
|
||||
for {
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
@ -298,35 +397,41 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
default:
|
||||
}
|
||||
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: r.LastSyncResourceVersion(),
|
||||
// We want to avoid situations of hanging watchers. Stop any watchers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
// start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent
|
||||
start := r.clock.Now()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
// Do the same for "429" errors.
|
||||
if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
|
||||
if w == nil {
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: r.LastSyncResourceVersion(),
|
||||
// We want to avoid situations of hanging watchers. Stop any watchers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
w, err = r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
if canRetry := isWatchErrorRetriable(err); canRetry {
|
||||
klog.V(4).Infof("%s: watch of %v returned %v - backing off", r.name, r.typeDescription, err)
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case <-r.initConnBackoffManager.Backoff().C():
|
||||
continue
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh)
|
||||
err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.typeDescription, r.setLastSyncResourceVersion, nil, r.clock, resyncerrc, stopCh)
|
||||
// Ensure that watch will not be reused across iterations.
|
||||
w.Stop()
|
||||
w = nil
|
||||
retry.After(err)
|
||||
if err != nil {
|
||||
if err != errorStopRequested {
|
||||
@ -335,16 +440,20 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.typeDescription, err)
|
||||
case apierrors.IsTooManyRequests(err):
|
||||
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.typeDescription)
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case <-r.initConnBackoffManager.Backoff().C():
|
||||
continue
|
||||
}
|
||||
case apierrors.IsInternalError(err) && retry.ShouldRetry():
|
||||
klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err)
|
||||
klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.typeDescription, err)
|
||||
continue
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.typeDescription, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -421,8 +530,8 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
}
|
||||
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
|
||||
if err != nil {
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err)
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.typeDescription, err)
|
||||
return fmt.Errorf("failed to list %v: %w", r.typeDescription, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
@ -460,6 +569,114 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// watchList establishes a stream to get a consistent snapshot of data
|
||||
// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal
|
||||
//
|
||||
// case 1: start at Most Recent (RV="", ResourceVersionMatch=ResourceVersionMatchNotOlderThan)
|
||||
// Establishes a consistent stream with the server.
|
||||
// That means the returned data is consistent, as if, served directly from etcd via a quorum read.
|
||||
// It begins with synthetic "Added" events of all resources up to the most recent ResourceVersion.
|
||||
// It ends with a synthetic "Bookmark" event containing the most recent ResourceVersion.
|
||||
// After receiving a "Bookmark" event the reflector is considered to be synchronized.
|
||||
// It replaces its internal store with the collected items and
|
||||
// reuses the current watch requests for getting further events.
|
||||
//
|
||||
// case 2: start at Exact (RV>"0", ResourceVersionMatch=ResourceVersionMatchNotOlderThan)
|
||||
// Establishes a stream with the server at the provided resource version.
|
||||
// To establish the initial state the server begins with synthetic "Added" events.
|
||||
// It ends with a synthetic "Bookmark" event containing the provided or newer resource version.
|
||||
// After receiving a "Bookmark" event the reflector is considered to be synchronized.
|
||||
// It replaces its internal store with the collected items and
|
||||
// reuses the current watch requests for getting further events.
|
||||
func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
|
||||
var w watch.Interface
|
||||
var err error
|
||||
var temporaryStore Store
|
||||
var resourceVersion string
|
||||
// TODO(#115478): see if this function could be turned
|
||||
// into a method and see if error handling
|
||||
// could be unified with the r.watch method
|
||||
isErrorRetriableWithSideEffectsFn := func(err error) bool {
|
||||
if canRetry := isWatchErrorRetriable(err); canRetry {
|
||||
klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
return true
|
||||
}
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
// we tried to re-establish a watch request but the provided RV
|
||||
// has either expired or it is greater than the server knows about.
|
||||
// In that case we reset the RV and
|
||||
// try to get a consistent snapshot from the watch cache (case 1)
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil, nil
|
||||
default:
|
||||
}
|
||||
|
||||
resourceVersion = ""
|
||||
lastKnownRV := r.rewatchResourceVersion()
|
||||
temporaryStore = NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
// TODO(#115478): large "list", slow clients, slow network, p&f
|
||||
// might slow down streaming and eventually fail.
|
||||
// maybe in such a case we should retry with an increased timeout?
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: lastKnownRV,
|
||||
AllowWatchBookmarks: true,
|
||||
SendInitialEvents: pointer.Bool(true),
|
||||
ResourceVersionMatch: metav1.ResourceVersionMatchNotOlderThan,
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
}
|
||||
start := r.clock.Now()
|
||||
|
||||
w, err = r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
if isErrorRetriableWithSideEffectsFn(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
bookmarkReceived := pointer.Bool(false)
|
||||
err = watchHandler(start, w, temporaryStore, r.expectedType, r.expectedGVK, r.name, r.typeDescription,
|
||||
func(rv string) { resourceVersion = rv },
|
||||
bookmarkReceived,
|
||||
r.clock, make(chan error), stopCh)
|
||||
if err != nil {
|
||||
w.Stop() // stop and retry with clean state
|
||||
if err == errorStopRequested {
|
||||
return nil, nil
|
||||
}
|
||||
if isErrorRetriableWithSideEffectsFn(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if *bookmarkReceived {
|
||||
break
|
||||
}
|
||||
}
|
||||
// We successfully got initial state from watch-list confirmed by the
|
||||
// "k8s.io/initial-events-end" bookmark.
|
||||
initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())})
|
||||
r.setIsLastSyncResourceVersionUnavailable(false)
|
||||
if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil {
|
||||
return nil, fmt.Errorf("unable to sync watch-list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// syncWith replaces the store's items with the given list.
|
||||
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
|
||||
found := make([]interface{}, 0, len(items))
|
||||
@ -478,15 +695,17 @@ func watchHandler(start time.Time,
|
||||
name string,
|
||||
expectedTypeName string,
|
||||
setLastSyncResourceVersion func(string),
|
||||
exitOnInitialEventsEndBookmark *bool,
|
||||
clock clock.Clock,
|
||||
errc chan error,
|
||||
stopCh <-chan struct{},
|
||||
) error {
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
// we're coming back in with the same watch interface.
|
||||
defer w.Stop()
|
||||
if exitOnInitialEventsEndBookmark != nil {
|
||||
// set it to false just in case somebody
|
||||
// made it positive
|
||||
*exitOnInitialEventsEndBookmark = false
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
@ -541,6 +760,11 @@ loop:
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok {
|
||||
if exitOnInitialEventsEndBookmark != nil {
|
||||
*exitOnInitialEventsEndBookmark = true
|
||||
}
|
||||
}
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
|
||||
}
|
||||
@ -549,6 +773,11 @@ loop:
|
||||
rvu.UpdateResourceVersion(resourceVersion)
|
||||
}
|
||||
eventCount++
|
||||
if exitOnInitialEventsEndBookmark != nil && *exitOnInitialEventsEndBookmark {
|
||||
watchDuration := clock.Since(start)
|
||||
klog.V(4).Infof("exiting %v Watch because received the bookmark that marks the end of initial events stream, total %v items received in %v", name, eventCount, watchDuration)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -597,6 +826,18 @@ func (r *Reflector) relistResourceVersion() string {
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// rewatchResourceVersion determines the resource version the reflector should start streaming from.
|
||||
func (r *Reflector) rewatchResourceVersion() string {
|
||||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
if r.isLastSyncResourceVersionUnavailable {
|
||||
// initial stream should return data at the most recent resource version.
|
||||
// the returned data must be consistent i.e. as if served from etcd via a quorum read
|
||||
return ""
|
||||
}
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned
|
||||
// "expired" or "too large resource version" error.
|
||||
func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) {
|
||||
@ -635,5 +876,25 @@ func isTooLargeResourceVersionError(err error) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Matches the message returned by api server before 1.17.0
|
||||
if strings.Contains(apierr.Status().Message, "Too large resource version") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isWatchErrorRetriable determines if it is safe to retry
|
||||
// a watch error retrieved from the server.
|
||||
func isWatchErrorRetriable(err error) bool {
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
// Do the same for "429" errors.
|
||||
if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
147
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
147
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache/synctrack"
|
||||
"k8s.io/utils/buffer"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
@ -132,11 +133,13 @@ import (
|
||||
// state, except that its ResourceVersion is replaced with a
|
||||
// ResourceVersion in which the object is actually absent.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
// It returns a registration handle for the handler that can be used to remove
|
||||
// the handler again.
|
||||
// AddEventHandler adds an event handler to the shared informer using
|
||||
// the shared informer's resync period. Events to a single handler are
|
||||
// delivered sequentially, but there is no coordination between
|
||||
// different handlers.
|
||||
// It returns a registration handle for the handler that can be used to
|
||||
// remove the handler again, or to tell if the handler is synced (has
|
||||
// seen every item in the initial list).
|
||||
AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer with the requested resync period; zero means
|
||||
@ -169,6 +172,10 @@ type SharedInformer interface {
|
||||
// HasSynced returns true if the shared informer's store has been
|
||||
// informed by at least one full LIST of the authoritative state
|
||||
// of the informer's object collection. This is unrelated to "resync".
|
||||
//
|
||||
// Note that this doesn't tell you if an individual handler is synced!!
|
||||
// For that, please call HasSynced on the handle returned by
|
||||
// AddEventHandler.
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
@ -198,10 +205,7 @@ type SharedInformer interface {
|
||||
//
|
||||
// Must be set before starting the informer.
|
||||
//
|
||||
// Note: Since the object given to the handler may be already shared with
|
||||
// other goroutines, it is advisable to copy the object being
|
||||
// transform before mutating it at all and returning the copy to prevent
|
||||
// data races.
|
||||
// Please see the comment on TransformFunc for more details.
|
||||
SetTransform(handler TransformFunc) error
|
||||
|
||||
// IsStopped reports whether the informer has already been stopped.
|
||||
@ -213,7 +217,14 @@ type SharedInformer interface {
|
||||
// Opaque interface representing the registration of ResourceEventHandler for
|
||||
// a SharedInformer. Must be supplied back to the same SharedInformer's
|
||||
// `RemoveEventHandler` to unregister the handlers.
|
||||
type ResourceEventHandlerRegistration interface{}
|
||||
//
|
||||
// Also used to tell if the handler is synced (has had all items in the initial
|
||||
// list delivered).
|
||||
type ResourceEventHandlerRegistration interface {
|
||||
// HasSynced reports if both the parent has synced and all pre-sync
|
||||
// events have been delivered.
|
||||
HasSynced() bool
|
||||
}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
type SharedIndexInformer interface {
|
||||
@ -223,14 +234,26 @@ type SharedIndexInformer interface {
|
||||
GetIndexer() Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
// NewSharedInformer creates a new instance for the ListerWatcher. See NewSharedIndexInformerWithOptions for full details.
|
||||
func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
// The created informer will not do resyncs if the given
|
||||
// defaultEventHandlerResyncPeriod is zero. Otherwise: for each
|
||||
// NewSharedIndexInformer creates a new instance for the ListerWatcher and specified Indexers. See
|
||||
// NewSharedIndexInformerWithOptions for full details.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
return NewSharedIndexInformerWithOptions(
|
||||
lw,
|
||||
exampleObject,
|
||||
SharedIndexInformerOptions{
|
||||
ResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
Indexers: indexers,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// NewSharedIndexInformerWithOptions creates a new instance for the ListerWatcher.
|
||||
// The created informer will not do resyncs if options.ResyncPeriod is zero. Otherwise: for each
|
||||
// handler that with a non-zero requested resync period, whether added
|
||||
// before or after the informer starts, the nominal resync period is
|
||||
// the requested resync period rounded up to a multiple of the
|
||||
@ -238,21 +261,36 @@ func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEv
|
||||
// checking period is established when the informer starts running,
|
||||
// and is the maximum of (a) the minimum of the resync periods
|
||||
// requested before the informer starts and the
|
||||
// defaultEventHandlerResyncPeriod given here and (b) the constant
|
||||
// options.ResyncPeriod given here and (b) the constant
|
||||
// `minimumResyncPeriod` defined in this file.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
func NewSharedIndexInformerWithOptions(lw ListerWatcher, exampleObject runtime.Object, options SharedIndexInformerOptions) SharedIndexInformer {
|
||||
realClock := &clock.RealClock{}
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
|
||||
return &sharedIndexInformer{
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, options.Indexers),
|
||||
processor: &sharedProcessor{clock: realClock},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: exampleObject,
|
||||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
|
||||
objectDescription: options.ObjectDescription,
|
||||
resyncCheckPeriod: options.ResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: options.ResyncPeriod,
|
||||
clock: realClock,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
// SharedIndexInformerOptions configures a sharedIndexInformer.
|
||||
type SharedIndexInformerOptions struct {
|
||||
// ResyncPeriod is the default event handler resync period and resync check
|
||||
// period. If unset/unspecified, these are defaulted to 0 (do not resync).
|
||||
ResyncPeriod time.Duration
|
||||
|
||||
// Indexers is the sharedIndexInformer's indexers. If unset/unspecified, no indexers are configured.
|
||||
Indexers Indexers
|
||||
|
||||
// ObjectDescription is the sharedIndexInformer's object description. This is passed through to the
|
||||
// underlying Reflector's type description.
|
||||
ObjectDescription string
|
||||
}
|
||||
|
||||
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
|
||||
@ -326,12 +364,13 @@ type sharedIndexInformer struct {
|
||||
|
||||
listerWatcher ListerWatcher
|
||||
|
||||
// objectType is an example object of the type this informer is
|
||||
// expected to handle. Only the type needs to be right, except
|
||||
// that when that is `unstructured.Unstructured` the object's
|
||||
// `"apiVersion"` and `"kind"` must also be right.
|
||||
// objectType is an example object of the type this informer is expected to handle. If set, an event
|
||||
// with an object with a mismatching type is dropped instead of being delivered to listeners.
|
||||
objectType runtime.Object
|
||||
|
||||
// objectDescription is the description of this informer's objects. This typically defaults to
|
||||
objectDescription string
|
||||
|
||||
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
|
||||
// shouldResync to check if any of our listeners need a resync.
|
||||
resyncCheckPeriod time.Duration
|
||||
@ -381,7 +420,8 @@ type updateNotification struct {
|
||||
}
|
||||
|
||||
type addNotification struct {
|
||||
newObj interface{}
|
||||
newObj interface{}
|
||||
isInInitialList bool
|
||||
}
|
||||
|
||||
type deleteNotification struct {
|
||||
@ -422,15 +462,17 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
Transformer: s.transform,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
ObjectDescription: s.objectDescription,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
WatchErrorHandler: s.watchErrorHandler,
|
||||
@ -559,7 +601,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
}
|
||||
}
|
||||
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize, s.HasSynced)
|
||||
|
||||
if !s.started {
|
||||
return s.processor.addListener(listener), nil
|
||||
@ -575,27 +617,35 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
|
||||
handle := s.processor.addListener(listener)
|
||||
for _, item := range s.indexer.List() {
|
||||
listener.add(addNotification{newObj: item})
|
||||
// Note that we enqueue these notifications with the lock held
|
||||
// and before returning the handle. That means there is never a
|
||||
// chance for anyone to call the handle's HasSynced method in a
|
||||
// state when it would falsely return true (i.e., when the
|
||||
// shared informer is synced but it has not observed an Add
|
||||
// with isInitialList being true, nor when the thread
|
||||
// processing notifications somehow goes faster than this
|
||||
// thread adding them and the counter is temporarily zero).
|
||||
listener.add(addNotification{newObj: item, isInInitialList: true})
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(s, s.indexer, s.transform, deltas)
|
||||
return processDeltas(s, s.indexer, deltas, isInInitialList)
|
||||
}
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnAdd(obj interface{}) {
|
||||
func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) {
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.cacheMutationDetector.AddObject(obj)
|
||||
s.processor.distribute(addNotification{newObj: obj}, false)
|
||||
s.processor.distribute(addNotification{newObj: obj, isInInitialList: isInInitialList}, false)
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
@ -817,6 +867,8 @@ type processorListener struct {
|
||||
|
||||
handler ResourceEventHandler
|
||||
|
||||
syncTracker *synctrack.SingleFileTracker
|
||||
|
||||
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
|
||||
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
@ -847,11 +899,18 @@ type processorListener struct {
|
||||
resyncLock sync.Mutex
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
|
||||
// HasSynced returns true if the source informer has synced, and all
|
||||
// corresponding events have been delivered.
|
||||
func (p *processorListener) HasSynced() bool {
|
||||
return p.syncTracker.HasSynced()
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int, hasSynced func() bool) *processorListener {
|
||||
ret := &processorListener{
|
||||
nextCh: make(chan interface{}),
|
||||
addCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
syncTracker: &synctrack.SingleFileTracker{UpstreamHasSynced: hasSynced},
|
||||
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
|
||||
requestedResyncPeriod: requestedResyncPeriod,
|
||||
resyncPeriod: resyncPeriod,
|
||||
@ -863,6 +922,9 @@ func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, res
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
if a, ok := notification.(addNotification); ok && a.isInInitialList {
|
||||
p.syncTracker.Start()
|
||||
}
|
||||
p.addCh <- notification
|
||||
}
|
||||
|
||||
@ -908,7 +970,10 @@ func (p *processorListener) run() {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
p.handler.OnAdd(notification.newObj, notification.isInInitialList)
|
||||
if notification.isInInitialList {
|
||||
p.syncTracker.Finished()
|
||||
}
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
|
83
vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go
generated
vendored
Normal file
83
vendor/k8s.io/client-go/tools/cache/synctrack/lazy.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package synctrack
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Lazy defers the computation of `Evaluate` to when it is necessary. It is
|
||||
// possible that Evaluate will be called in parallel from multiple goroutines.
|
||||
type Lazy[T any] struct {
|
||||
Evaluate func() (T, error)
|
||||
|
||||
cache atomic.Pointer[cacheEntry[T]]
|
||||
}
|
||||
|
||||
type cacheEntry[T any] struct {
|
||||
eval func() (T, error)
|
||||
lock sync.RWMutex
|
||||
result *T
|
||||
}
|
||||
|
||||
func (e *cacheEntry[T]) get() (T, error) {
|
||||
if cur := func() *T {
|
||||
e.lock.RLock()
|
||||
defer e.lock.RUnlock()
|
||||
return e.result
|
||||
}(); cur != nil {
|
||||
return *cur, nil
|
||||
}
|
||||
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
if e.result != nil {
|
||||
return *e.result, nil
|
||||
}
|
||||
r, err := e.eval()
|
||||
if err == nil {
|
||||
e.result = &r
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (z *Lazy[T]) newCacheEntry() *cacheEntry[T] {
|
||||
return &cacheEntry[T]{eval: z.Evaluate}
|
||||
}
|
||||
|
||||
// Notify should be called when something has changed necessitating a new call
|
||||
// to Evaluate.
|
||||
func (z *Lazy[T]) Notify() { z.cache.Swap(z.newCacheEntry()) }
|
||||
|
||||
// Get should be called to get the current result of a call to Evaluate. If the
|
||||
// current cached value is stale (due to a call to Notify), then Evaluate will
|
||||
// be called synchronously. If subsequent calls to Get happen (without another
|
||||
// Notify), they will all wait for the same return value.
|
||||
//
|
||||
// Error returns are not cached and will cause multiple calls to evaluate!
|
||||
func (z *Lazy[T]) Get() (T, error) {
|
||||
e := z.cache.Load()
|
||||
if e == nil {
|
||||
// Since we don't force a constructor, nil is a possible value.
|
||||
// If multiple Gets race to set this, the swap makes sure only
|
||||
// one wins.
|
||||
z.cache.CompareAndSwap(nil, z.newCacheEntry())
|
||||
e = z.cache.Load()
|
||||
}
|
||||
return e.get()
|
||||
}
|
120
vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go
generated
vendored
Normal file
120
vendor/k8s.io/client-go/tools/cache/synctrack/synctrack.go
generated
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package synctrack contains utilities for helping controllers track whether
|
||||
// they are "synced" or not, that is, whether they have processed all items
|
||||
// from the informer's initial list.
|
||||
package synctrack
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// AsyncTracker helps propagate HasSynced in the face of multiple worker threads.
|
||||
type AsyncTracker[T comparable] struct {
|
||||
UpstreamHasSynced func() bool
|
||||
|
||||
lock sync.Mutex
|
||||
waiting sets.Set[T]
|
||||
}
|
||||
|
||||
// Start should be called prior to processing each key which is part of the
|
||||
// initial list.
|
||||
func (t *AsyncTracker[T]) Start(key T) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.waiting == nil {
|
||||
t.waiting = sets.New[T](key)
|
||||
} else {
|
||||
t.waiting.Insert(key)
|
||||
}
|
||||
}
|
||||
|
||||
// Finished should be called when finished processing a key which was part of
|
||||
// the initial list. Since keys are tracked individually, nothing bad happens
|
||||
// if you call Finished without a corresponding call to Start. This makes it
|
||||
// easier to use this in combination with e.g. queues which don't make it easy
|
||||
// to plumb through the isInInitialList boolean.
|
||||
func (t *AsyncTracker[T]) Finished(key T) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.waiting != nil {
|
||||
t.waiting.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
// HasSynced returns true if the source is synced and every key present in the
|
||||
// initial list has been processed. This relies on the source not considering
|
||||
// itself synced until *after* it has delivered the notification for the last
|
||||
// key, and that notification handler must have called Start.
|
||||
func (t *AsyncTracker[T]) HasSynced() bool {
|
||||
// Call UpstreamHasSynced first: it might take a lock, which might take
|
||||
// a significant amount of time, and we can't hold our lock while
|
||||
// waiting on that or a user is likely to get a deadlock.
|
||||
if !t.UpstreamHasSynced() {
|
||||
return false
|
||||
}
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
return t.waiting.Len() == 0
|
||||
}
|
||||
|
||||
// SingleFileTracker helps propagate HasSynced when events are processed in
|
||||
// order (i.e. via a queue).
|
||||
type SingleFileTracker struct {
|
||||
// Important: count is used with atomic operations so it must be 64-bit
|
||||
// aligned, otherwise atomic operations will panic. Having it at the top of
|
||||
// the struct will guarantee that, even on 32-bit arches.
|
||||
// See https://pkg.go.dev/sync/atomic#pkg-note-BUG for more information.
|
||||
count int64
|
||||
|
||||
UpstreamHasSynced func() bool
|
||||
}
|
||||
|
||||
// Start should be called prior to processing each key which is part of the
|
||||
// initial list.
|
||||
func (t *SingleFileTracker) Start() {
|
||||
atomic.AddInt64(&t.count, 1)
|
||||
}
|
||||
|
||||
// Finished should be called when finished processing a key which was part of
|
||||
// the initial list. You must never call Finished() before (or without) its
|
||||
// corresponding Start(), that is a logic error that could cause HasSynced to
|
||||
// return a wrong value. To help you notice this should it happen, Finished()
|
||||
// will panic if the internal counter goes negative.
|
||||
func (t *SingleFileTracker) Finished() {
|
||||
result := atomic.AddInt64(&t.count, -1)
|
||||
if result < 0 {
|
||||
panic("synctrack: negative counter; this logic error means HasSynced may return incorrect value")
|
||||
}
|
||||
}
|
||||
|
||||
// HasSynced returns true if the source is synced and every key present in the
|
||||
// initial list has been processed. This relies on the source not considering
|
||||
// itself synced until *after* it has delivered the notification for the last
|
||||
// key, and that notification handler must have called Start.
|
||||
func (t *SingleFileTracker) HasSynced() bool {
|
||||
// Call UpstreamHasSynced first: it might take a lock, which might take
|
||||
// a significant amount of time, and we don't want to then act on a
|
||||
// stale count value.
|
||||
if !t.UpstreamHasSynced() {
|
||||
return false
|
||||
}
|
||||
return atomic.LoadInt64(&t.count) <= 0
|
||||
}
|
Reference in New Issue
Block a user