Migrate from snapClient.VolumesnapshotV1alpha1Client to

snapClient.SnapshotV1alpha1Client and also update kube dependency

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal
2019-06-24 14:38:09 +05:30
committed by mergify[bot]
parent 3bc6771df8
commit 22ff5c0911
1031 changed files with 34242 additions and 177906 deletions

View File

@ -105,7 +105,7 @@ func LoadFromFile(path string) (*Info, error) {
// The fields of client.Config with a corresponding field in the Info are set
// with the value from the Info.
func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) {
var config restclient.Config = c
var config = c
config.Username = info.User
config.Password = info.Password
config.CAFile = info.CAFile
@ -118,6 +118,7 @@ func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error)
return config, nil
}
// Complete returns true if the Kubernetes API authorization info is complete.
func (info Info) Complete() bool {
return len(info.User) > 0 ||
len(info.CertFile) > 0 ||

View File

@ -48,7 +48,7 @@ type ExpirationCache struct {
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
// so unittests don't rely on the system clock.
type ExpirationPolicy interface {
IsExpired(obj *timestampedEntry) bool
IsExpired(obj *TimestampedEntry) bool
}
// TTLPolicy implements a ttl based ExpirationPolicy.
@ -63,26 +63,29 @@ type TTLPolicy struct {
// IsExpired returns true if the given object is older than the ttl, or it can't
// determine its age.
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool {
return p.Ttl > 0 && p.Clock.Since(obj.Timestamp) > p.Ttl
}
// timestampedEntry is the only type allowed in a ExpirationCache.
type timestampedEntry struct {
obj interface{}
timestamp time.Time
// TimestampedEntry is the only type allowed in a ExpirationCache.
// Keep in mind that it is not safe to share timestamps between computers.
// Behavior may be inconsistent if you get a timestamp from the API Server and
// use it on the client machine as part of your ExpirationCache.
type TimestampedEntry struct {
Obj interface{}
Timestamp time.Time
}
// getTimestampedEntry returns the timestampedEntry stored under the given key.
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
// getTimestampedEntry returns the TimestampedEntry stored under the given key.
func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) {
item, _ := c.cacheStorage.Get(key)
if tsEntry, ok := item.(*timestampedEntry); ok {
if tsEntry, ok := item.(*TimestampedEntry); ok {
return tsEntry, true
}
return nil, false
}
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't
// already expired. It holds a write lock across deletion.
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
// Prevent all inserts from the time we deem an item as "expired" to when we
@ -95,11 +98,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
return nil, false
}
if c.expirationPolicy.IsExpired(timestampedItem) {
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
c.cacheStorage.Delete(key)
return nil, false
}
return timestampedItem.obj, true
return timestampedItem.Obj, true
}
// GetByKey returns the item stored under the key, or sets exists=false.
@ -126,7 +129,7 @@ func (c *ExpirationCache) List() []interface{} {
list := make([]interface{}, 0, len(items))
for _, item := range items {
obj := item.(*timestampedEntry).obj
obj := item.(*TimestampedEntry).Obj
if key, err := c.keyFunc(obj); err != nil {
list = append(list, obj)
} else if obj, exists := c.getOrExpire(key); exists {
@ -151,7 +154,7 @@ func (c *ExpirationCache) Add(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
c.cacheStorage.Add(key, &timestampedEntry{obj, c.clock.Now()})
c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now()})
return nil
}
@ -184,7 +187,7 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er
if err != nil {
return KeyError{item, err}
}
items[key] = &timestampedEntry{item, ts}
items[key] = &TimestampedEntry{item, ts}
}
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
@ -199,10 +202,15 @@ func (c *ExpirationCache) Resync() error {
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}})
}
// NewExpirationStore creates and returns a ExpirationCache for a given policy
func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store {
return &ExpirationCache{
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
keyFunc: keyFunc,
clock: clock.RealClock{},
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
expirationPolicy: expirationPolicy,
}
}

View File

@ -38,7 +38,7 @@ type FakeExpirationPolicy struct {
RetrieveKeyFunc KeyFunc
}
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool {
key, _ := p.RetrieveKeyFunc(obj)
return !p.NeverExpire.Has(key)
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package cache
import (
"context"
"errors"
"fmt"
"io"
@ -24,7 +25,6 @@ import (
"net"
"net/url"
"reflect"
"strings"
"sync"
"syscall"
"time"
@ -38,6 +38,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/pager"
"k8s.io/klog"
"k8s.io/utils/trace"
)
@ -68,6 +69,9 @@ type Reflector struct {
lastSyncResourceVersion string
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
lastSyncResourceVersionMutex sync.RWMutex
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
// Defaults to pager.PageSize.
WatchListPageSize int64
}
var (
@ -79,7 +83,7 @@ var (
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
// The indexer is configured to key on namespace
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc})
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
return indexer, reflector
}
@ -108,11 +112,6 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
return r
}
func makeValidPrometheusMetricLabel(in string) string {
// this isn't perfect, but it removes our common characters
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
}
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
// call chains to NewReflector, so they'd be low entropy names for reflectors
var internalPackages = []string{"client-go/tools/cache/"}
@ -179,7 +178,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
panicCh <- r
}
}()
list, err = r.listerWatcher.List(options)
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
// list request will return the full response.
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
return r.listerWatcher.List(opts)
}))
if r.WatchListPageSize != 0 {
pager.PageSize = r.WatchListPageSize
}
// Pager falls back to full list if paginated list calls fail due to an "Expired" error.
list, err = pager.List(context.Background(), options)
close(listCh)
}()
select {
@ -257,6 +265,11 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
// We want to avoid situations of hanging watchers. Stop any wachers that do not
// receive any events within the timeout window.
TimeoutSeconds: &timeoutSeconds,
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
// watch bookmarks, it will ignore this field).
// Disabled in Alpha release of watch bookmarks feature.
AllowWatchBookmarks: false,
}
w, err := r.listerWatcher.Watch(options)
@ -354,6 +367,8 @@ loop:
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
}
case watch.Bookmark:
// A `Bookmark` means watch has synced here, just update the resourceVersion
default:
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
}
@ -363,7 +378,7 @@ loop:
}
}
watchDuration := r.clock.Now().Sub(start)
watchDuration := r.clock.Since(start)
if watchDuration < 1*time.Second && eventCount == 0 {
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
}

View File

@ -94,23 +94,6 @@ var metricsFactory = struct {
metricsProvider: noopMetricsProvider{},
}
func newReflectorMetrics(name string) *reflectorMetrics {
var ret *reflectorMetrics
if len(name) == 0 {
return ret
}
return &reflectorMetrics{
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
}
}
// SetReflectorMetricsProvider sets the metrics provider
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
metricsFactory.setProviders.Do(func() {

View File

@ -31,31 +31,84 @@ import (
"k8s.io/klog"
)
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
// one behavior change compared to a standard Informer. When you receive a notification, the cache
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
// on the contents of the cache exactly matching the notification you've received in handler
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
// has advantages over the broadcaster since it allows us to share a common cache across many
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
// watch.
// SharedInformer provides eventually consistent linkage of its
// clients to the authoritative state of a given collection of
// objects. An object is identified by its API group, kind/resource,
// namespace, and name. One SharedInfomer provides linkage to objects
// of a particular API group and kind/resource. The linked object
// collection of a SharedInformer may be further restricted to one
// namespace and/or by label selector and/or field selector.
//
// The authoritative state of an object is what apiservers provide
// access to, and an object goes through a strict sequence of states.
// A state is either "absent" or present with a ResourceVersion and
// other appropriate content.
//
// A SharedInformer maintains a local cache, exposed by Store(), of
// the state of each relevant object. This cache is eventually
// consistent with the authoritative state. This means that, unless
// prevented by persistent communication problems, if ever a
// particular object ID X is authoritatively associated with a state S
// then for every SharedInformer I whose collection includes (X, S)
// eventually either (1) I's cache associates X with S or a later
// state of X, (2) I is stopped, or (3) the authoritative state
// service for X terminates. To be formally complete, we say that the
// absent state meets any restriction by label selector or field
// selector.
//
// As a simple example, if a collection of objects is henceforeth
// unchanging and a SharedInformer is created that links to that
// collection then that SharedInformer's cache eventually holds an
// exact copy of that collection (unless it is stopped too soon, the
// authoritative state service ends, or communication problems between
// the two persistently thwart achievement).
//
// As another simple example, if the local cache ever holds a
// non-absent state for some object ID and the object is eventually
// removed from the authoritative state then eventually the object is
// removed from the local cache (unless the SharedInformer is stopped
// too soon, the authoritative state service emnds, or communication
// problems persistently thwart the desired result).
//
// The keys in Store() are of the form namespace/name for namespaced
// objects, and are simply the name for non-namespaced objects.
//
// A client is identified here by a ResourceEventHandler. For every
// update to the SharedInformer's local cache and for every client,
// eventually either the SharedInformer is stopped or the client is
// notified of the update. These notifications happen after the
// corresponding cache update and, in the case of a
// SharedIndexInformer, after the corresponding index updates. It is
// possible that additional cache and index updates happen before such
// a prescribed notification. For a given SharedInformer and client,
// all notifications are delivered sequentially. For a given
// SharedInformer, client, and object ID, the notifications are
// delivered in order.
//
// A delete notification exposes the last locally known non-absent
// state, except that its ResourceVersion is replaced with a
// ResourceVersion in which the object is actually absent.
type SharedInformer interface {
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
AddEventHandler(handler ResourceEventHandler)
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
// specified resync period. Events to a single handler are delivered sequentially, but there is
// no coordination between different handlers.
// AddEventHandlerWithResyncPeriod adds an event handler to the
// shared informer using the specified resync period. The resync
// operation consists of delivering to the handler a create
// notification for every object in the informer's local cache; it
// does not add any interactions with the authoritative storage.
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
// GetStore returns the Store.
// GetStore returns the informer's local cache as a Store.
GetStore() Store
// GetController gives back a synthetic interface that "votes" to start the informer
GetController() Controller
// Run starts the shared informer, which will be stopped when stopCh is closed.
// Run starts and runs the shared informer, returning after it stops.
// The informer will be stopped when stopCh is closed.
Run(stopCh <-chan struct{})
// HasSynced returns true if the shared informer's store has synced.
// HasSynced returns true if the shared informer's store has been
// informed by at least one full LIST of the authoritative state
// of the informer's object collection. This is unrelated to "resync".
HasSynced() bool
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
// store. The value returned is not synchronized with access to the underlying store and is not
@ -555,7 +608,7 @@ func (p *processorListener) run() {
case deleteNotification:
p.handler.OnDelete(notification.oldObj)
default:
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
}
}
// the only way to get here is if the p.nextCh is empty and closed

View File

@ -185,7 +185,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
set := index[indexKey]
list := make([]interface{}, 0, set.Len())
for _, key := range set.List() {
for key := range set {
list = append(list, c.items[key])
}

View File

@ -228,6 +228,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
// blindly overwrite existing values based on precedence
if len(configAuthInfo.Token) > 0 {
mergedConfig.BearerToken = configAuthInfo.Token
mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
} else if len(configAuthInfo.TokenFile) > 0 {
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
if err != nil {
@ -296,16 +297,6 @@ func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
return config
}
// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information
func makeServerIdentificationConfig(info clientauth.Info) restclient.Config {
config := restclient.Config{}
config.CAFile = info.CAFile
if info.Insecure != nil {
config.Insecure = *info.Insecure
}
return config
}
func canIdentifyUser(config restclient.Config) bool {
return len(config.Username) > 0 ||
(len(config.CertFile) > 0 || len(config.CertData) > 0) ||
@ -499,8 +490,9 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error)
if server := config.overrides.ClusterInfo.Server; len(server) > 0 {
icc.Host = server
}
if token := config.overrides.AuthInfo.Token; len(token) > 0 {
icc.BearerToken = token
if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 {
icc.BearerToken = config.overrides.AuthInfo.Token
icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile
}
if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 {
icc.TLSClientConfig.CAFile = certificateAuthorityFile

View File

@ -356,7 +356,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
if err != nil {
return nil, err
}
klog.V(6).Infoln("Config loaded from file", filename)
klog.V(6).Infoln("Config loaded from file: ", filename)
// set LocationOfOrigin on every Cluster, User, and Context
for key, obj := range config.AuthInfos {

View File

@ -25,9 +25,11 @@ import (
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
const defaultPageSize = 500
const defaultPageBufferSize = 10
// ListPageFunc returns a list object for the given list options.
type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error)
@ -48,6 +50,9 @@ type ListPager struct {
PageFn ListPageFunc
FullListIfExpired bool
// Number of pages to buffer
PageBufferSize int32
}
// New creates a new pager from the provided pager function using the default
@ -58,6 +63,7 @@ func New(fn ListPageFunc) *ListPager {
PageSize: defaultPageSize,
PageFn: fn,
FullListIfExpired: true,
PageBufferSize: defaultPageBufferSize,
}
}
@ -73,6 +79,12 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
}
var list *metainternalversion.List
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
obj, err := p.PageFn(ctx, options)
if err != nil {
if !errors.IsResourceExpired(err) || !p.FullListIfExpired {
@ -115,3 +127,105 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
options.Continue = m.GetContinue()
}
}
// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If
// fn returns an error, processing stops and that error is returned. If fn does not return an error,
// any error encountered while retrieving the list from the server is returned. If the context
// cancels or times out, the context error is returned. Since the list is retrieved in paginated
// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list
// requests exceed the expiration limit of the apiserver being called.
//
// Items are retrieved in chunks from the server to reduce the impact on the server with up to
// ListPager.PageBufferSize chunks buffered concurrently in the background.
func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
return meta.EachListItem(obj, fn)
})
}
// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on
// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does
// not return an error, any error encountered while retrieving the list from the server is
// returned. If the context cancels or times out, the context error is returned. Since the list is
// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if
// the pagination list requests exceed the expiration limit of the apiserver being called.
//
// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background.
func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
if p.PageBufferSize < 0 {
return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize)
}
// Ensure background goroutine is stopped if this call exits before all list items are
// processed. Cancelation error from this deferred cancel call is never returned to caller;
// either the list result has already been sent to bgResultC or the fn error is returned and
// the cancelation error is discarded.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
chunkC := make(chan runtime.Object, p.PageBufferSize)
bgResultC := make(chan error, 1)
go func() {
defer utilruntime.HandleCrash()
var err error
defer func() {
close(chunkC)
bgResultC <- err
}()
err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error {
select {
case chunkC <- chunk: // buffer the chunk, this can block
case <-ctx.Done():
return ctx.Err()
}
return nil
})
}()
for o := range chunkC {
err := fn(o)
if err != nil {
return err // any fn error should be returned immediately
}
}
// promote the results of our background goroutine to the foreground
return <-bgResultC
}
// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list
// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return
// an error, any error encountered while retrieving the list from the server is returned. If the
// context cancels or times out, the context error is returned. Since the list is retrieved in
// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the
// pagination list requests exceed the expiration limit of the apiserver being called.
func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
if options.Limit == 0 {
options.Limit = p.PageSize
}
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
obj, err := p.PageFn(ctx, options)
if err != nil {
return err
}
m, err := meta.ListAccessor(obj)
if err != nil {
return fmt.Errorf("returned object must be a list: %v", err)
}
if err := fn(obj); err != nil {
return err
}
// if we have no more items, return.
if len(m.GetContinue()) == 0 {
return nil
}
// set the next loop up
options.Continue = m.GetContinue()
}
}

View File

@ -50,6 +50,40 @@ type EventSink interface {
Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error)
}
// CorrelatorOptions allows you to change the default of the EventSourceObjectSpamFilter
// and EventAggregator in EventCorrelator
type CorrelatorOptions struct {
// The lru cache size used for both EventSourceObjectSpamFilter and the EventAggregator
// If not specified (zero value), the default specified in events_cache.go will be picked
// This means that the LRUCacheSize has to be greater than 0.
LRUCacheSize int
// The burst size used by the token bucket rate filtering in EventSourceObjectSpamFilter
// If not specified (zero value), the default specified in events_cache.go will be picked
// This means that the BurstSize has to be greater than 0.
BurstSize int
// The fill rate of the token bucket in queries per second in EventSourceObjectSpamFilter
// If not specified (zero value), the default specified in events_cache.go will be picked
// This means that the QPS has to be greater than 0.
QPS float32
// The func used by the EventAggregator to group event keys for aggregation
// If not specified (zero value), EventAggregatorByReasonFunc will be used
KeyFunc EventAggregatorKeyFunc
// The func used by the EventAggregator to produced aggregated message
// If not specified (zero value), EventAggregatorByReasonMessageFunc will be used
MessageFunc EventAggregatorMessageFunc
// The number of events in an interval before aggregation happens by the EventAggregator
// If not specified (zero value), the default specified in events_cache.go will be picked
// This means that the MaxEvents has to be greater than 0
MaxEvents int
// The amount of time in seconds that must transpire since the last occurrence of a similar event before it is considered new by the EventAggregator
// If not specified (zero value), the default specified in events_cache.go will be picked
// This means that the MaxIntervalInSeconds has to be greater than 0
MaxIntervalInSeconds int
// The clock used by the EventAggregator to allow for testing
// If not specified (zero value), clock.RealClock{} will be used
Clock clock.Clock
}
// EventRecorder knows how to record events on behalf of an EventSource.
type EventRecorder interface {
// Event constructs an event from the given information and puts it in the queue for sending.
@ -97,16 +131,31 @@ type EventBroadcaster interface {
// Creates a new event broadcaster.
func NewBroadcaster() EventBroadcaster {
return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration}
return &eventBroadcasterImpl{
Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
sleepDuration: defaultSleepDuration,
}
}
func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster {
return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration}
return &eventBroadcasterImpl{
Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
sleepDuration: sleepDuration,
}
}
func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster {
return &eventBroadcasterImpl{
Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
sleepDuration: defaultSleepDuration,
options: options,
}
}
type eventBroadcasterImpl struct {
*watch.Broadcaster
sleepDuration time.Duration
options CorrelatorOptions
}
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
@ -116,7 +165,7 @@ func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSin
// The default math/rand package functions aren't thread safe, so create a
// new Rand object for each StartRecording call.
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
eventCorrelator := NewEventCorrelator(clock.RealClock{})
eventCorrelator := NewEventCorrelatorWithOptions(eventBroadcaster.options)
return eventBroadcaster.StartEventWatcher(
func(event *v1.Event) {
recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration)

View File

@ -443,6 +443,52 @@ func NewEventCorrelator(clock clock.Clock) *EventCorrelator {
}
}
func NewEventCorrelatorWithOptions(options CorrelatorOptions) *EventCorrelator {
optionsWithDefaults := populateDefaults(options)
spamFilter := NewEventSourceObjectSpamFilter(optionsWithDefaults.LRUCacheSize,
optionsWithDefaults.BurstSize, optionsWithDefaults.QPS, optionsWithDefaults.Clock)
return &EventCorrelator{
filterFunc: spamFilter.Filter,
aggregator: NewEventAggregator(
optionsWithDefaults.LRUCacheSize,
optionsWithDefaults.KeyFunc,
optionsWithDefaults.MessageFunc,
optionsWithDefaults.MaxEvents,
optionsWithDefaults.MaxIntervalInSeconds,
optionsWithDefaults.Clock),
logger: newEventLogger(optionsWithDefaults.LRUCacheSize, optionsWithDefaults.Clock),
}
}
// populateDefaults populates the zero value options with defaults
func populateDefaults(options CorrelatorOptions) CorrelatorOptions {
if options.LRUCacheSize == 0 {
options.LRUCacheSize = maxLruCacheEntries
}
if options.BurstSize == 0 {
options.BurstSize = defaultSpamBurst
}
if options.QPS == 0 {
options.QPS = defaultSpamQPS
}
if options.KeyFunc == nil {
options.KeyFunc = EventAggregatorByReasonFunc
}
if options.MessageFunc == nil {
options.MessageFunc = EventAggregatorByReasonMessageFunc
}
if options.MaxEvents == 0 {
options.MaxEvents = defaultAggregateMaxEvents
}
if options.MaxIntervalInSeconds == 0 {
options.MaxIntervalInSeconds = defaultAggregateIntervalInSeconds
}
if options.Clock == nil {
options.Clock = clock.RealClock{}
}
return options
}
// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events
func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) {
if newEvent == nil {

View File

@ -18,42 +18,86 @@ package watch
import (
"sync"
"sync/atomic"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
func newTicketer() *ticketer {
return &ticketer{
func newEventProcessor(out chan<- watch.Event) *eventProcessor {
return &eventProcessor{
out: out,
cond: sync.NewCond(&sync.Mutex{}),
done: make(chan struct{}),
}
}
type ticketer struct {
counter uint64
// eventProcessor buffers events and writes them to an out chan when a reader
// is waiting. Because of the requirement to buffer events, it synchronizes
// input with a condition, and synchronizes output with a channels. It needs to
// be able to yield while both waiting on an input condition and while blocked
// on writing to the output channel.
type eventProcessor struct {
out chan<- watch.Event
cond *sync.Cond
current uint64
cond *sync.Cond
buff []watch.Event
done chan struct{}
}
func (t *ticketer) GetTicket() uint64 {
// -1 to start from 0
return atomic.AddUint64(&t.counter, 1) - 1
func (e *eventProcessor) run() {
for {
batch := e.takeBatch()
e.writeBatch(batch)
if e.stopped() {
return
}
}
}
func (t *ticketer) WaitForTicket(ticket uint64, f func()) {
t.cond.L.Lock()
defer t.cond.L.Unlock()
for ticket != t.current {
t.cond.Wait()
func (e *eventProcessor) takeBatch() []watch.Event {
e.cond.L.Lock()
defer e.cond.L.Unlock()
for len(e.buff) == 0 && !e.stopped() {
e.cond.Wait()
}
f()
batch := e.buff
e.buff = nil
return batch
}
t.current++
t.cond.Broadcast()
func (e *eventProcessor) writeBatch(events []watch.Event) {
for _, event := range events {
select {
case e.out <- event:
case <-e.done:
return
}
}
}
func (e *eventProcessor) push(event watch.Event) {
e.cond.L.Lock()
defer e.cond.L.Unlock()
defer e.cond.Signal()
e.buff = append(e.buff, event)
}
func (e *eventProcessor) stopped() bool {
select {
case <-e.done:
return true
default:
return false
}
}
func (e *eventProcessor) stop() {
close(e.done)
e.cond.Signal()
}
// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface
@ -61,55 +105,44 @@ func (t *ticketer) WaitForTicket(ticket uint64, f func()) {
// it also returns a channel you can use to wait for the informers to fully shutdown.
func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface, <-chan struct{}) {
ch := make(chan watch.Event)
doneCh := make(chan struct{})
w := watch.NewProxyWatcher(ch)
t := newTicketer()
e := newEventProcessor(ch)
indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
select {
case ch <- watch.Event{
Type: watch.Added,
Object: obj.(runtime.Object),
}:
case <-w.StopChan():
}
e.push(watch.Event{
Type: watch.Added,
Object: obj.(runtime.Object),
})
},
UpdateFunc: func(old, new interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
select {
case ch <- watch.Event{
Type: watch.Modified,
Object: new.(runtime.Object),
}:
case <-w.StopChan():
}
e.push(watch.Event{
Type: watch.Modified,
Object: new.(runtime.Object),
})
},
DeleteFunc: func(obj interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
staleObj, stale := obj.(cache.DeletedFinalStateUnknown)
if stale {
// We have no means of passing the additional information down using watch API based on watch.Event
// but the caller can filter such objects by checking if metadata.deletionTimestamp is set
obj = staleObj
}
staleObj, stale := obj.(cache.DeletedFinalStateUnknown)
if stale {
// We have no means of passing the additional information down using
// watch API based on watch.Event but the caller can filter such
// objects by checking if metadata.deletionTimestamp is set
obj = staleObj
}
select {
case ch <- watch.Event{
Type: watch.Deleted,
Object: obj.(runtime.Object),
}:
case <-w.StopChan():
}
e.push(watch.Event{
Type: watch.Deleted,
Object: obj.(runtime.Object),
})
},
}, cache.Indexers{})
go e.run()
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
defer e.stop()
informer.Run(w.StopChan())
}()

View File

@ -153,7 +153,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
// We need to inspect the event and get ResourceVersion out of it
switch event.Type {
case watch.Added, watch.Modified, watch.Deleted:
case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark:
metaObject, ok := event.Object.(resourceVersionGetter)
if !ok {
_ = rw.send(watch.Event{