mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to v1.25.0
update kubernetes to latest v1.25.0 release. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
f47839d73d
commit
e3bf375035
26
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
26
vendor/k8s.io/client-go/tools/auth/clientauth.go
generated
vendored
@ -45,20 +45,20 @@ client.Client from an authcfg.Info.
|
||||
|
||||
Example:
|
||||
|
||||
import (
|
||||
"pkg/client"
|
||||
"pkg/client/auth"
|
||||
)
|
||||
import (
|
||||
"pkg/client"
|
||||
"pkg/client/auth"
|
||||
)
|
||||
|
||||
info, err := auth.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
clientConfig = client.Config{}
|
||||
clientConfig.Host = "example.com:4901"
|
||||
clientConfig = info.MergeWithConfig()
|
||||
client := client.New(clientConfig)
|
||||
client.Pods(ns).List()
|
||||
info, err := auth.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
clientConfig = client.Config{}
|
||||
clientConfig.Host = "example.com:4901"
|
||||
clientConfig = info.MergeWithConfig()
|
||||
client := client.New(clientConfig)
|
||||
client.Pods(ns).List()
|
||||
*/
|
||||
package auth
|
||||
|
||||
|
77
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
77
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@ -199,17 +199,17 @@ func (c *controller) processLoop() {
|
||||
// can't return an error. The handlers MUST NOT modify the objects
|
||||
// received; this concerns not only the top level of structure but all
|
||||
// the data structures reachable from it.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
// - OnAdd is called when an object is added.
|
||||
// - OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// - OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
@ -305,15 +305,14 @@ func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
func NewInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
@ -332,16 +331,15 @@ func NewInformer(
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * indexers is the indexer for the received object type.
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
// - indexers is the indexer for the received object type.
|
||||
func NewIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
@ -454,16 +452,15 @@ func processDeltas(
|
||||
// providing event notifications.
|
||||
//
|
||||
// Parameters
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
// * clientState is the store you want to populate
|
||||
//
|
||||
// - lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// - objType is an object of the type that you expect to receive.
|
||||
// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// - h is the object you want notifications sent to.
|
||||
// - clientState is the store you want to populate
|
||||
func newInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
|
38
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
38
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@ -74,11 +74,11 @@ type DeltaFIFOOptions struct {
|
||||
// the Pop() method.
|
||||
//
|
||||
// DeltaFIFO solves this use case:
|
||||
// * You want to process every object change (delta) at most once.
|
||||
// * When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// * You want to process the deletion of some of the objects.
|
||||
// * You might want to periodically reprocess objects.
|
||||
// - You want to process every object change (delta) at most once.
|
||||
// - When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// - You want to process the deletion of some of the objects.
|
||||
// - You might want to periodically reprocess objects.
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but they
|
||||
@ -179,21 +179,21 @@ type Deltas []Delta
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
//
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
//
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
// storage. Currently these two methods are used for creating Lister
|
||||
// and internal tests.
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
// storage. Currently these two methods are used for creating Lister
|
||||
// and internal tests.
|
||||
//
|
||||
// Also see the comment on DeltaFIFO.
|
||||
//
|
||||
|
15
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
15
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@ -25,13 +25,14 @@ import (
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
// 1. All entries are automatically time stamped on insert
|
||||
// a. The key is computed based off the original item/keyFunc
|
||||
// b. The value inserted under that key is the timestamped item
|
||||
// 2. Expiration happens lazily on read based on the expiration policy
|
||||
// a. No item can be inserted into the store while we're expiring
|
||||
// *any* item in the cache.
|
||||
// 3. Time-stamps are stripped off unexpired entries before return
|
||||
//
|
||||
// Note that the ExpirationCache is inherently slower than a normal
|
||||
// threadSafeStore because it takes a write lock every time it checks if
|
||||
// an item has expired.
|
||||
|
9
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
9
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@ -103,10 +103,11 @@ func Pop(queue Queue) interface{} {
|
||||
// recent version will be processed. This can't be done with a channel
|
||||
//
|
||||
// FIFO solves this use case:
|
||||
// * You want to process every object (exactly) once.
|
||||
// * You want to process the most recent version of the object when you process it.
|
||||
// * You do not want to process deleted objects, they should be removed from the queue.
|
||||
// * You do not want to periodically reprocess objects.
|
||||
// - You want to process every object (exactly) once.
|
||||
// - You want to process the most recent version of the object when you process it.
|
||||
// - You do not want to process deleted objects, they should be removed from the queue.
|
||||
// - You do not want to periodically reprocess objects.
|
||||
//
|
||||
// Compare with DeltaFIFO for other use cases.
|
||||
type FIFO struct {
|
||||
lock sync.RWMutex
|
||||
|
10
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
10
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
@ -28,10 +28,10 @@ import (
|
||||
// Delete).
|
||||
//
|
||||
// There are three kinds of strings here:
|
||||
// 1. a storage key, as defined in the Store interface,
|
||||
// 2. a name of an index, and
|
||||
// 3. an "indexed value", which is produced by an IndexFunc and
|
||||
// can be a field value or any other string computed from the object.
|
||||
// 1. a storage key, as defined in the Store interface,
|
||||
// 2. a name of an index, and
|
||||
// 3. an "indexed value", which is produced by an IndexFunc and
|
||||
// can be a field value or any other string computed from the object.
|
||||
type Indexer interface {
|
||||
Store
|
||||
// Index returns the stored objects whose set of indexed values
|
||||
@ -47,7 +47,7 @@ type Indexer interface {
|
||||
// ByIndex returns the stored objects whose set of indexed values
|
||||
// for the named index includes the given indexed value
|
||||
ByIndex(indexName, indexedValue string) ([]interface{}, error)
|
||||
// GetIndexer return the indexers
|
||||
// GetIndexers return the indexers
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
|
287
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
287
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -71,6 +71,8 @@ type Reflector struct {
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
// MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch.
|
||||
MaxInternalErrorRetryDuration time.Duration
|
||||
|
||||
resyncPeriod time.Duration
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
@ -253,112 +255,9 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var paginatedResult bool
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
switch {
|
||||
case r.WatchListPageSize != 0:
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
case r.paginatedResult:
|
||||
// We got a paginated result initially. Assume this resource and server honor
|
||||
// paging requests (i.e. watch cache is probably disabled) and leave the default
|
||||
// pager size set.
|
||||
case options.ResourceVersion != "" && options.ResourceVersion != "0":
|
||||
// User didn't explicitly request pagination.
|
||||
//
|
||||
// With ResourceVersion != "", we have a possibility to list from watch cache,
|
||||
// but we do that (for ResourceVersion != "0") only if Limit is unset.
|
||||
// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
|
||||
// switch off pagination to force listing from watch cache (if enabled).
|
||||
// With the existing semantic of RV (result is at least as fresh as provided RV),
|
||||
// this is correct and doesn't lead to going back in time.
|
||||
//
|
||||
// We also don't turn off pagination for ResourceVersion="0", since watch cache
|
||||
// is ignoring Limit in that case anyway, and if watch cache is not enabled
|
||||
// we don't introduce regression.
|
||||
pager.PageSize = 0
|
||||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, the full list might fail because the
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
|
||||
if err != nil {
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
// However, we want to do that only for the initial list (which is the only case
|
||||
// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
|
||||
// situations we may force listing directly from etcd (by setting ResourceVersion="")
|
||||
// which will return paginated result, even if watch cache is enabled. However, in
|
||||
// that case, we still want to prefer sending requests to watch cache if possible.
|
||||
//
|
||||
// Paginated result returned for request with ResourceVersion="0" mean that watch
|
||||
// cache is disabled and there are a lot of objects of a given type. In such case,
|
||||
// there is no need to prefer listing from watch cache.
|
||||
if options.ResourceVersion == "0" && paginatedResult {
|
||||
r.paginatedResult = true
|
||||
}
|
||||
|
||||
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("unable to sync list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}(); err != nil {
|
||||
err := r.list(stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -390,6 +289,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
}()
|
||||
|
||||
retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock)
|
||||
for {
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
@ -399,8 +299,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options = metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: r.LastSyncResourceVersion(),
|
||||
// We want to avoid situations of hanging watchers. Stop any watchers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
@ -426,7 +326,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh)
|
||||
retry.After(err)
|
||||
if err != nil {
|
||||
if err != errorStopRequested {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
@ -438,6 +340,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
case apierrors.IsInternalError(err) && retry.ShouldRetry():
|
||||
klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err)
|
||||
continue
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
@ -447,6 +352,114 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
// list simply lists all items and records a resource version obtained from the server at the moment of the call.
|
||||
// the resource version can be used for further progress notification (aka. watch).
|
||||
func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
var resourceVersion string
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var paginatedResult bool
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
switch {
|
||||
case r.WatchListPageSize != 0:
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
case r.paginatedResult:
|
||||
// We got a paginated result initially. Assume this resource and server honor
|
||||
// paging requests (i.e. watch cache is probably disabled) and leave the default
|
||||
// pager size set.
|
||||
case options.ResourceVersion != "" && options.ResourceVersion != "0":
|
||||
// User didn't explicitly request pagination.
|
||||
//
|
||||
// With ResourceVersion != "", we have a possibility to list from watch cache,
|
||||
// but we do that (for ResourceVersion != "0") only if Limit is unset.
|
||||
// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
|
||||
// switch off pagination to force listing from watch cache (if enabled).
|
||||
// With the existing semantic of RV (result is at least as fresh as provided RV),
|
||||
// this is correct and doesn't lead to going back in time.
|
||||
//
|
||||
// We also don't turn off pagination for ResourceVersion="0", since watch cache
|
||||
// is ignoring Limit in that case anyway, and if watch cache is not enabled
|
||||
// we don't introduce regression.
|
||||
pager.PageSize = 0
|
||||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, the full list might fail because the
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case r := <-panicCh:
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
|
||||
if err != nil {
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
// However, we want to do that only for the initial list (which is the only case
|
||||
// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
|
||||
// situations we may force listing directly from etcd (by setting ResourceVersion="")
|
||||
// which will return paginated result, even if watch cache is enabled. However, in
|
||||
// that case, we still want to prefer sending requests to watch cache if possible.
|
||||
//
|
||||
// Paginated result returned for request with ResourceVersion="0" mean that watch
|
||||
// cache is disabled and there are a lot of objects of a given type. In such case,
|
||||
// there is no need to prefer listing from watch cache.
|
||||
if options.ResourceVersion == "0" && paginatedResult {
|
||||
r.paginatedResult = true
|
||||
}
|
||||
|
||||
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("unable to sync list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
initTrace.Step("Resource version updated")
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncWith replaces the store's items with the given list.
|
||||
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
|
||||
found := make([]interface{}, 0, len(items))
|
||||
@ -456,8 +469,19 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
|
||||
return r.store.Replace(found, resourceVersion)
|
||||
}
|
||||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
// watchHandler watches w and sets setLastSyncResourceVersion
|
||||
func watchHandler(start time.Time,
|
||||
w watch.Interface,
|
||||
store Store,
|
||||
expectedType reflect.Type,
|
||||
expectedGVK *schema.GroupVersionKind,
|
||||
name string,
|
||||
expectedTypeName string,
|
||||
setLastSyncResourceVersion func(string),
|
||||
clock clock.Clock,
|
||||
errc chan error,
|
||||
stopCh <-chan struct{},
|
||||
) error {
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
@ -478,62 +502,61 @@ loop:
|
||||
if event.Type == watch.Error {
|
||||
return apierrors.FromObject(event.Object)
|
||||
}
|
||||
if r.expectedType != nil {
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
if expectedType != nil {
|
||||
if e, a := expectedType, reflect.TypeOf(event.Object); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if r.expectedGVK != nil {
|
||||
if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a))
|
||||
if expectedGVK != nil {
|
||||
if e, a := *expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
meta, err := meta.Accessor(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
|
||||
continue
|
||||
}
|
||||
newResourceVersion := meta.GetResourceVersion()
|
||||
resourceVersion := meta.GetResourceVersion()
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
err := r.store.Add(event.Object)
|
||||
err := store.Add(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", name, event.Object, err))
|
||||
}
|
||||
case watch.Modified:
|
||||
err := r.store.Update(event.Object)
|
||||
err := store.Update(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", name, event.Object, err))
|
||||
}
|
||||
case watch.Deleted:
|
||||
// TODO: Will any consumers need access to the "last known
|
||||
// state", which is passed in event.Object? If so, may need
|
||||
// to change this.
|
||||
err := r.store.Delete(event.Object)
|
||||
err := store.Delete(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", name, event.Object, err))
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event))
|
||||
}
|
||||
*resourceVersion = newResourceVersion
|
||||
r.setLastSyncResourceVersion(newResourceVersion)
|
||||
if rvu, ok := r.store.(ResourceVersionUpdater); ok {
|
||||
rvu.UpdateResourceVersion(newResourceVersion)
|
||||
setLastSyncResourceVersion(resourceVersion)
|
||||
if rvu, ok := store.(ResourceVersionUpdater); ok {
|
||||
rvu.UpdateResourceVersion(resourceVersion)
|
||||
}
|
||||
eventCount++
|
||||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Since(start)
|
||||
watchDuration := clock.Since(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name)
|
||||
}
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount)
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
78
vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go
generated
vendored
Normal file
78
vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/utils/clock"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RetryWithDeadline interface {
|
||||
After(error)
|
||||
ShouldRetry() bool
|
||||
}
|
||||
|
||||
type retryWithDeadlineImpl struct {
|
||||
firstErrorTime time.Time
|
||||
lastErrorTime time.Time
|
||||
maxRetryDuration time.Duration
|
||||
minResetPeriod time.Duration
|
||||
isRetryable func(error) bool
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline {
|
||||
return &retryWithDeadlineImpl{
|
||||
firstErrorTime: time.Time{},
|
||||
lastErrorTime: time.Time{},
|
||||
maxRetryDuration: maxRetryDuration,
|
||||
minResetPeriod: minResetPeriod,
|
||||
isRetryable: isRetryable,
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *retryWithDeadlineImpl) reset() {
|
||||
r.firstErrorTime = time.Time{}
|
||||
r.lastErrorTime = time.Time{}
|
||||
}
|
||||
|
||||
func (r *retryWithDeadlineImpl) After(err error) {
|
||||
if r.isRetryable(err) {
|
||||
if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod {
|
||||
r.reset()
|
||||
}
|
||||
|
||||
if r.firstErrorTime.IsZero() {
|
||||
r.firstErrorTime = r.clock.Now()
|
||||
}
|
||||
r.lastErrorTime = r.clock.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *retryWithDeadlineImpl) ShouldRetry() bool {
|
||||
if r.maxRetryDuration <= time.Duration(0) {
|
||||
return false
|
||||
}
|
||||
|
||||
if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration {
|
||||
return true
|
||||
}
|
||||
|
||||
r.reset()
|
||||
return false
|
||||
}
|
13
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
13
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
@ -199,8 +199,11 @@ func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
return c.cacheStorage.Index(indexName, obj)
|
||||
}
|
||||
|
||||
func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
return c.cacheStorage.IndexKeys(indexName, indexKey)
|
||||
// IndexKeys returns the storage keys of the stored objects whose set of
|
||||
// indexed values for the named index includes the given indexed value.
|
||||
// The returned keys are suitable to pass to GetByKey().
|
||||
func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) {
|
||||
return c.cacheStorage.IndexKeys(indexName, indexedValue)
|
||||
}
|
||||
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
@ -208,8 +211,10 @@ func (c *cache) ListIndexFuncValues(indexName string) []string {
|
||||
return c.cacheStorage.ListIndexFuncValues(indexName)
|
||||
}
|
||||
|
||||
func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
|
||||
return c.cacheStorage.ByIndex(indexName, indexKey)
|
||||
// ByIndex returns the stored objects whose set of indexed values
|
||||
// for the named index includes the given indexed value.
|
||||
func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) {
|
||||
return c.cacheStorage.ByIndex(indexName, indexedValue)
|
||||
}
|
||||
|
||||
func (c *cache) AddIndexers(newIndexers Indexers) error {
|
||||
|
4
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@ -47,9 +47,9 @@ type ThreadSafeStore interface {
|
||||
ListKeys() []string
|
||||
Replace(map[string]interface{}, string)
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
IndexKeys(indexName, indexedValue string) ([]string, error)
|
||||
ListIndexFuncValues(name string) []string
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
ByIndex(indexName, indexedValue string) ([]interface{}, error)
|
||||
GetIndexers() Indexers
|
||||
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
|
6
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
6
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
@ -160,8 +160,10 @@ func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules {
|
||||
|
||||
// Load starts by running the MigrationRules and then
|
||||
// takes the loading rules and returns a Config object based on following rules.
|
||||
// if the ExplicitPath, return the unmerged explicit file
|
||||
// Otherwise, return a merged config based on the Precedence slice
|
||||
//
|
||||
// if the ExplicitPath, return the unmerged explicit file
|
||||
// Otherwise, return a merged config based on the Precedence slice
|
||||
//
|
||||
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
|
||||
// Read errors or files with non-deserializable content produce errors.
|
||||
// The first file to set a particular map key wins and map key's value is never changed.
|
||||
|
15
vendor/k8s.io/client-go/tools/clientcmd/validation.go
generated
vendored
15
vendor/k8s.io/client-go/tools/clientcmd/validation.go
generated
vendored
@ -204,8 +204,19 @@ func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error {
|
||||
|
||||
if exists {
|
||||
validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
|
||||
validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...)
|
||||
validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...)
|
||||
|
||||
// Default to empty users and clusters and let the validation function report an error.
|
||||
authInfo := config.AuthInfos[context.AuthInfo]
|
||||
if authInfo == nil {
|
||||
authInfo = &clientcmdapi.AuthInfo{}
|
||||
}
|
||||
validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *authInfo)...)
|
||||
|
||||
cluster := config.Clusters[context.Cluster]
|
||||
if cluster == nil {
|
||||
cluster = &clientcmdapi.Cluster{}
|
||||
}
|
||||
validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *cluster)...)
|
||||
}
|
||||
|
||||
return newErrConfigurationInvalid(validationErrors)
|
||||
|
10
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
10
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
@ -307,7 +307,15 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value is used to stop recording
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) func() {
|
||||
watcher := e.Watch()
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
|
||||
// TODO: Rewrite the function signature to return an error, for
|
||||
// now just return a no-op function
|
||||
return func() {
|
||||
klog.Error("The event watcher failed to start")
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
for {
|
||||
|
2
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
2
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@ -161,7 +161,7 @@ type LeaderElectionConfig struct {
|
||||
// lifecycle events of the LeaderElector. These are invoked asynchronously.
|
||||
//
|
||||
// possible future callbacks:
|
||||
// * OnChallenge()
|
||||
// - OnChallenge()
|
||||
type LeaderCallbacks struct {
|
||||
// OnStartedLeading is called when a LeaderElector client starts leading
|
||||
OnStartedLeading func(context.Context)
|
||||
|
18
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
18
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
@ -62,18 +62,18 @@ type ForwardedPort struct {
|
||||
}
|
||||
|
||||
/*
|
||||
valid port specifications:
|
||||
valid port specifications:
|
||||
|
||||
5000
|
||||
- forwards from localhost:5000 to pod:5000
|
||||
5000
|
||||
- forwards from localhost:5000 to pod:5000
|
||||
|
||||
8888:5000
|
||||
- forwards from localhost:8888 to pod:5000
|
||||
8888:5000
|
||||
- forwards from localhost:8888 to pod:5000
|
||||
|
||||
0:5000
|
||||
:5000
|
||||
- selects a random available local port,
|
||||
forwards from localhost:<random port> to pod:5000
|
||||
0:5000
|
||||
:5000
|
||||
- selects a random available local port,
|
||||
forwards from localhost:<random port> to pod:5000
|
||||
*/
|
||||
func parsePorts(ports []string) ([]ForwardedPort, error) {
|
||||
var forwards []ForwardedPort
|
||||
|
14
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
14
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
@ -92,7 +92,7 @@ type EventRecorder interface {
|
||||
// Event constructs an event from the given information and puts it in the queue for sending.
|
||||
// 'object' is the object this event is about. Event will make a reference-- or you may also
|
||||
// pass a reference to the object directly.
|
||||
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
|
||||
// 'eventtype' of this event, and can be one of Normal, Warning. New types could be added in future
|
||||
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
|
||||
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
|
||||
// to automate handling of events, so imagine people writing switch statements to handle them.
|
||||
@ -298,7 +298,10 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
|
||||
watcher := e.Watch()
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
for watchEvent := range watcher.ResultChan() {
|
||||
@ -346,7 +349,12 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
|
||||
// when we go to shut down this broadcaster. Just drop events if we get overloaded,
|
||||
// and log an error if that happens (we've configured the broadcaster to drop
|
||||
// outgoing events anyway).
|
||||
if sent := recorder.ActionOrDrop(watch.Added, event); !sent {
|
||||
sent, err := recorder.ActionOrDrop(watch.Added, event)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to record event: %v (will not retry!)", err)
|
||||
return
|
||||
}
|
||||
if !sent {
|
||||
klog.Errorf("unable to record event: too many queued events, dropped event %#v", event)
|
||||
}
|
||||
}
|
||||
|
14
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
14
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
@ -235,10 +235,10 @@ type aggregateRecord struct {
|
||||
// EventAggregate checks if a similar event has been seen according to the
|
||||
// aggregation configuration (max events, max interval, etc) and returns:
|
||||
//
|
||||
// - The (potentially modified) event that should be created
|
||||
// - The cache key for the event, for correlation purposes. This will be set to
|
||||
// the full key for normal events, and to the result of
|
||||
// EventAggregatorMessageFunc for aggregate events.
|
||||
// - The (potentially modified) event that should be created
|
||||
// - The cache key for the event, for correlation purposes. This will be set to
|
||||
// the full key for normal events, and to the result of
|
||||
// EventAggregatorMessageFunc for aggregate events.
|
||||
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) {
|
||||
now := metav1.NewTime(e.clock.Now())
|
||||
var record aggregateRecord
|
||||
@ -427,14 +427,14 @@ type EventCorrelateResult struct {
|
||||
// prior to interacting with the API server to record the event.
|
||||
//
|
||||
// The default behavior is as follows:
|
||||
// * Aggregation is performed if a similar event is recorded 10 times
|
||||
// - Aggregation is performed if a similar event is recorded 10 times
|
||||
// in a 10 minute rolling interval. A similar event is an event that varies only by
|
||||
// the Event.Message field. Rather than recording the precise event, aggregation
|
||||
// will create a new event whose message reports that it has combined events with
|
||||
// the same reason.
|
||||
// * Events are incrementally counted if the exact same event is encountered multiple
|
||||
// - Events are incrementally counted if the exact same event is encountered multiple
|
||||
// times.
|
||||
// * A source may burst 25 events about an object, but has a refill rate budget
|
||||
// - A source may burst 25 events about an object, but has a refill rate budget
|
||||
// per object of 1 event every 5 minutes to control long-tail of spam.
|
||||
func NewEventCorrelator(clock clock.PassiveClock) *EventCorrelator {
|
||||
cacheSize := maxLruCacheEntries
|
||||
|
4
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
4
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
@ -101,7 +101,9 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions
|
||||
// It guarantees you to see all events and in the order they happened.
|
||||
// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case.
|
||||
// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing
|
||||
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
|
||||
//
|
||||
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
|
||||
//
|
||||
// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges").
|
||||
func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
w, err := NewRetryWatcher(initialResourceVersion, watcherClient)
|
||||
|
Reference in New Issue
Block a user