mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes dep to 1.24.0
As kubernetes 1.24.0 is released, updating kubernetes dependencies to 1.24.0 updates: #3086 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
fc1529f268
commit
c4f79d455f
7
vendor/k8s.io/client-go/tools/auth/OWNERS
generated
vendored
7
vendor/k8s.io/client-go/tools/auth/OWNERS
generated
vendored
@ -1,9 +1,8 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- sig-auth-authenticators-approvers
|
||||
- sig-auth-authenticators-approvers
|
||||
reviewers:
|
||||
- sig-auth-authenticators-reviewers
|
||||
- sig-auth-authenticators-reviewers
|
||||
labels:
|
||||
- sig/auth
|
||||
|
||||
- sig/auth
|
||||
|
58
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
58
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@ -1,38 +1,28 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
- liggitt
|
||||
- ncdc
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
- liggitt
|
||||
- ncdc
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- davidopp
|
||||
- pmorie
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- soltysh
|
||||
- jsafrane
|
||||
- dims
|
||||
- hongchaodeng
|
||||
- krousey
|
||||
- xiang90
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- jessfraz
|
||||
- mfojtik
|
||||
- sdminonne
|
||||
- ncdc
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- soltysh
|
||||
- jsafrane
|
||||
- dims
|
||||
- ingvagabund
|
||||
- ncdc
|
||||
|
82
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
82
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -370,8 +371,8 @@ type TransformFunc func(interface{}) (interface{}, error)
|
||||
// the returned Store for Get/List operations; Add/Modify/Deletes will cause
|
||||
// the event notifications to be faulty.
|
||||
// The given transform function will be called on all objects before they will
|
||||
// put put into the Store and corresponding Add/Modify/Delete handlers will
|
||||
// be invokved for them.
|
||||
// put into the Store and corresponding Add/Modify/Delete handlers will
|
||||
// be invoked for them.
|
||||
func NewTransformingInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
@ -406,6 +407,49 @@ func NewTransformingIndexerInformer(
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer)
|
||||
}
|
||||
|
||||
// Multiplexes updates in the form of a list of Deltas into a Store, and informs
|
||||
// a given handler of events OnUpdate, OnAdd, OnDelete
|
||||
func processDeltas(
|
||||
// Object which receives event notifications from the given deltas
|
||||
handler ResourceEventHandler,
|
||||
clientState Store,
|
||||
transformer TransformFunc,
|
||||
deltas Deltas,
|
||||
) error {
|
||||
// from oldest to newest
|
||||
for _, d := range deltas {
|
||||
obj := d.Object
|
||||
if transformer != nil {
|
||||
var err error
|
||||
obj, err = transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
if old, exists, err := clientState.Get(obj); err == nil && exists {
|
||||
if err := clientState.Update(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnUpdate(old, obj)
|
||||
} else {
|
||||
if err := clientState.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnAdd(obj)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
handler.OnDelete(obj)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// newInformer returns a controller for populating the store while also
|
||||
// providing event notifications.
|
||||
//
|
||||
@ -444,38 +488,10 @@ func newInformer(
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
obj := d.Object
|
||||
if transformer != nil {
|
||||
var err error
|
||||
obj, err = transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
if old, exists, err := clientState.Get(obj); err == nil && exists {
|
||||
if err := clientState.Update(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, obj)
|
||||
} else {
|
||||
if err := clientState.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(obj)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(obj)
|
||||
}
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(h, clientState, transformer, deltas)
|
||||
}
|
||||
return nil
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
},
|
||||
}
|
||||
return New(cfg)
|
||||
|
8
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
8
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -231,7 +231,7 @@ var (
|
||||
|
||||
// Used to indicate that watching stopped because of a signal from the stop
|
||||
// channel passed in from a client of the reflector.
|
||||
errorStopRequested = errors.New("Stop requested")
|
||||
errorStopRequested = errors.New("stop requested")
|
||||
)
|
||||
|
||||
// resyncChan returns a channel which will receive something when a resync is
|
||||
@ -258,7 +258,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name})
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var paginatedResult bool
|
||||
@ -319,7 +319,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
initTrace.Step("Objects listed", trace.Field{"error", err})
|
||||
initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err})
|
||||
if err != nil {
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err)
|
||||
@ -401,7 +401,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options = metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// We want to avoid situations of hanging watchers. Stop any watchers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
|
105
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
105
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
@ -180,6 +181,20 @@ type SharedInformer interface {
|
||||
// The handler should return quickly - any expensive processing should be
|
||||
// offloaded.
|
||||
SetWatchErrorHandler(handler WatchErrorHandler) error
|
||||
|
||||
// The TransformFunc is called for each object which is about to be stored.
|
||||
//
|
||||
// This function is intended for you to take the opportunity to
|
||||
// remove, transform, or normalize fields. One use case is to strip unused
|
||||
// metadata fields out of objects to save on RAM cost.
|
||||
//
|
||||
// Must be set before starting the informer.
|
||||
//
|
||||
// Note: Since the object given to the handler may be already shared with
|
||||
// other goroutines, it is advisable to copy the object being
|
||||
// transform before mutating it at all and returning the copy to prevent
|
||||
// data races.
|
||||
SetTransform(handler TransformFunc) error
|
||||
}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
@ -244,7 +259,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS
|
||||
return false
|
||||
}
|
||||
|
||||
klog.Infof("Caches are synced for %s ", controllerName)
|
||||
klog.Infof("Caches are synced for %s", controllerName)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -318,6 +333,8 @@ type sharedIndexInformer struct {
|
||||
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
|
||||
transform TransformFunc
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
@ -365,6 +382,18 @@ func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
s.transform = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
@ -538,45 +567,47 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
s.cacheMutationDetector.AddObject(d.Object)
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
if deltas, ok := obj.(Deltas); ok {
|
||||
return processDeltas(s, s.indexer, s.transform, deltas)
|
||||
}
|
||||
return errors.New("object given as Process argument is not Deltas")
|
||||
}
|
||||
|
||||
isSync := false
|
||||
switch {
|
||||
case d.Type == Sync:
|
||||
// Sync events are only propagated to listeners that requested resync
|
||||
isSync = true
|
||||
case d.Type == Replaced:
|
||||
if accessor, err := meta.Accessor(d.Object); err == nil {
|
||||
if oldAccessor, err := meta.Accessor(old); err == nil {
|
||||
// Replaced events that didn't change resourceVersion are treated as resync events
|
||||
// and only propagated to listeners that requested resync
|
||||
isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
|
||||
}
|
||||
}
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object}, false)
|
||||
}
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnAdd(obj interface{}) {
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.cacheMutationDetector.AddObject(obj)
|
||||
s.processor.distribute(addNotification{newObj: obj}, false)
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnUpdate(old, new interface{}) {
|
||||
isSync := false
|
||||
|
||||
// If is a Sync event, isSync should be true
|
||||
// If is a Replaced event, isSync is true if resource version is unchanged.
|
||||
// If RV is unchanged: this is a Sync/Replaced event, so isSync is true
|
||||
|
||||
if accessor, err := meta.Accessor(new); err == nil {
|
||||
if oldAccessor, err := meta.Accessor(old); err == nil {
|
||||
// Events that didn't change resourceVersion are treated as resync events
|
||||
// and only propagated to listeners that requested resync
|
||||
isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.cacheMutationDetector.AddObject(new)
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync)
|
||||
}
|
||||
|
||||
// Conforms to ResourceEventHandler
|
||||
func (s *sharedIndexInformer) OnDelete(old interface{}) {
|
||||
// Invocation of this function is locked under s.blockDeltas, so it is
|
||||
// save to distribute the notification
|
||||
s.processor.distribute(deleteNotification{oldObj: old}, false)
|
||||
}
|
||||
|
||||
// sharedProcessor has a collection of processorListener and can
|
||||
|
6
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
6
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@ -71,11 +71,7 @@ type threadSafeMap struct {
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Add(key string, obj interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
oldObject := c.items[key]
|
||||
c.items[key] = obj
|
||||
c.updateIndices(oldObject, obj, key)
|
||||
c.Update(key, obj)
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Update(key string, obj interface{}) {
|
||||
|
4
vendor/k8s.io/client-go/tools/clientcmd/overrides.go
generated
vendored
4
vendor/k8s.io/client-go/tools/clientcmd/overrides.go
generated
vendored
@ -73,6 +73,7 @@ type ClusterOverrideFlags struct {
|
||||
CertificateAuthority FlagInfo
|
||||
InsecureSkipTLSVerify FlagInfo
|
||||
TLSServerName FlagInfo
|
||||
ProxyURL FlagInfo
|
||||
}
|
||||
|
||||
// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to
|
||||
@ -160,6 +161,7 @@ const (
|
||||
FlagUsername = "username"
|
||||
FlagPassword = "password"
|
||||
FlagTimeout = "request-timeout"
|
||||
FlagProxyURL = "proxy-url"
|
||||
)
|
||||
|
||||
// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
|
||||
@ -195,6 +197,7 @@ func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags {
|
||||
CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"},
|
||||
InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"},
|
||||
TLSServerName: FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."},
|
||||
ProxyURL: FlagInfo{prefix + FlagProxyURL, "", "", "If provided, this URL will be used to connect via proxy"},
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,6 +237,7 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f
|
||||
flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority)
|
||||
flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify)
|
||||
flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName)
|
||||
flagNames.ProxyURL.BindStringFlag(flags, &clusterInfo.ProxyURL)
|
||||
}
|
||||
|
||||
// BindFlags is a convenience method to bind the specified flags to their associated variables
|
||||
|
12
vendor/k8s.io/client-go/tools/events/OWNERS
generated
vendored
12
vendor/k8s.io/client-go/tools/events/OWNERS
generated
vendored
@ -1,10 +1,10 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- sig-instrumentation-approvers
|
||||
- yastij
|
||||
- wojtek-t
|
||||
- sig-instrumentation-approvers
|
||||
- wojtek-t
|
||||
reviewers:
|
||||
- sig-instrumentation-reviewers
|
||||
- yastij
|
||||
- wojtek-t
|
||||
- sig-instrumentation-reviewers
|
||||
- wojtek-t
|
||||
emeritus_approvers:
|
||||
- yastij
|
||||
|
15
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
15
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
@ -1,12 +1,11 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- mikedanese
|
||||
- timothysc
|
||||
- mikedanese
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- timothysc
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- ingvagabund
|
||||
emeritus_approvers:
|
||||
- timothysc
|
||||
|
14
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
14
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@ -32,7 +32,7 @@ import (
|
||||
// and use ConfigMaps as the means to pass that configuration
|
||||
// data we will likely move to deprecate the Endpoints lock.
|
||||
|
||||
type ConfigMapLock struct {
|
||||
type configMapLock struct {
|
||||
// ConfigMapMeta should contain a Name and a Namespace of a
|
||||
// ConfigMapMeta object that the LeaderElector will attempt to lead.
|
||||
ConfigMapMeta metav1.ObjectMeta
|
||||
@ -42,7 +42,7 @@ type ConfigMapLock struct {
|
||||
}
|
||||
|
||||
// Get returns the election record from a ConfigMap Annotation
|
||||
func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
func (cml *configMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
@ -63,7 +63,7 @@ func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byt
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (cml *ConfigMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (cml *configMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -81,7 +81,7 @@ func (cml *ConfigMapLock) Create(ctx context.Context, ler LeaderElectionRecord)
|
||||
}
|
||||
|
||||
// Update will update an existing annotation on a given resource.
|
||||
func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (cml *configMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if cml.cm == nil {
|
||||
return errors.New("configmap not initialized, call get or create first")
|
||||
}
|
||||
@ -102,7 +102,7 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (cml *ConfigMapLock) RecordEvent(s string) {
|
||||
func (cml *configMapLock) RecordEvent(s string) {
|
||||
if cml.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
@ -116,11 +116,11 @@ func (cml *ConfigMapLock) RecordEvent(s string) {
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (cml *ConfigMapLock) Describe() string {
|
||||
func (cml *configMapLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (cml *ConfigMapLock) Identity() string {
|
||||
func (cml *configMapLock) Identity() string {
|
||||
return cml.LockConfig.Identity
|
||||
}
|
||||
|
14
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
14
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type EndpointsLock struct {
|
||||
type endpointsLock struct {
|
||||
// EndpointsMeta should contain a Name and a Namespace of an
|
||||
// Endpoints object that the LeaderElector will attempt to lead.
|
||||
EndpointsMeta metav1.ObjectMeta
|
||||
@ -37,7 +37,7 @@ type EndpointsLock struct {
|
||||
}
|
||||
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
func (el *endpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
@ -58,7 +58,7 @@ func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (el *EndpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (el *endpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -76,7 +76,7 @@ func (el *EndpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) e
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
func (el *endpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if el.e == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
@ -97,7 +97,7 @@ func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) e
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (el *EndpointsLock) RecordEvent(s string) {
|
||||
func (el *endpointsLock) RecordEvent(s string) {
|
||||
if el.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
@ -111,11 +111,11 @@ func (el *EndpointsLock) RecordEvent(s string) {
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (el *EndpointsLock) Describe() string {
|
||||
func (el *endpointsLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (el *EndpointsLock) Identity() string {
|
||||
func (el *endpointsLock) Identity() string {
|
||||
return el.LockConfig.Identity
|
||||
}
|
||||
|
86
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
86
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@ -31,11 +31,77 @@ import (
|
||||
|
||||
const (
|
||||
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
|
||||
EndpointsResourceLock = "endpoints"
|
||||
ConfigMapsResourceLock = "configmaps"
|
||||
endpointsResourceLock = "endpoints"
|
||||
configMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
EndpointsLeasesResourceLock = "endpointsleases"
|
||||
ConfigMapsLeasesResourceLock = "configmapsleases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// endpoint objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - endpoints
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
EndpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// configmap objects.
|
||||
//
|
||||
// The example of such flow scheme could look like this:
|
||||
// apiVersion: flowcontrol.apiserver.k8s.io/v1beta2
|
||||
// kind: FlowSchema
|
||||
// metadata:
|
||||
// name: my-leader-election
|
||||
// spec:
|
||||
// distinguisherMethod:
|
||||
// type: ByUser
|
||||
// matchingPrecedence: 200
|
||||
// priorityLevelConfiguration:
|
||||
// name: leader-election # reference the <leader-election> PL
|
||||
// rules:
|
||||
// - resourceRules:
|
||||
// - apiGroups:
|
||||
// - ""
|
||||
// namespaces:
|
||||
// - '*'
|
||||
// resources:
|
||||
// - configmaps
|
||||
// verbs:
|
||||
// - get
|
||||
// - create
|
||||
// - update
|
||||
// subjects:
|
||||
// - kind: ServiceAccount
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
ConfigMapsLeasesResourceLock = "configmapsleases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
@ -98,7 +164,7 @@ type Interface interface {
|
||||
|
||||
// Manufacture will create a lock of a given type according to the input parameters
|
||||
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
|
||||
endpointsLock := &EndpointsLock{
|
||||
endpointsLock := &endpointsLock{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
@ -106,7 +172,7 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
configmapLock := &ConfigMapLock{
|
||||
configmapLock := &configMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
@ -123,10 +189,10 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
LockConfig: rlc,
|
||||
}
|
||||
switch lockType {
|
||||
case EndpointsResourceLock:
|
||||
return endpointsLock, nil
|
||||
case ConfigMapsResourceLock:
|
||||
return configmapLock, nil
|
||||
case endpointsResourceLock:
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", EndpointsLeasesResourceLock)
|
||||
case configMapsResourceLock:
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", ConfigMapsLeasesResourceLock)
|
||||
case LeasesResourceLock:
|
||||
return leaseLock, nil
|
||||
case EndpointsLeasesResourceLock:
|
||||
|
5
vendor/k8s.io/client-go/tools/metrics/OWNERS
generated
vendored
5
vendor/k8s.io/client-go/tools/metrics/OWNERS
generated
vendored
@ -1,6 +1,5 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- krousey
|
||||
- jayunit100
|
||||
- wojtek-t
|
||||
- jayunit100
|
||||
|
21
vendor/k8s.io/client-go/tools/metrics/metrics.go
generated
vendored
21
vendor/k8s.io/client-go/tools/metrics/metrics.go
generated
vendored
@ -42,6 +42,11 @@ type LatencyMetric interface {
|
||||
Observe(ctx context.Context, verb string, u url.URL, latency time.Duration)
|
||||
}
|
||||
|
||||
// SizeMetric observes client response size partitioned by verb and host.
|
||||
type SizeMetric interface {
|
||||
Observe(ctx context.Context, verb string, host string, size float64)
|
||||
}
|
||||
|
||||
// ResultMetric counts response codes partitioned by method and host.
|
||||
type ResultMetric interface {
|
||||
Increment(ctx context.Context, code string, method string, host string)
|
||||
@ -60,6 +65,10 @@ var (
|
||||
ClientCertRotationAge DurationMetric = noopDuration{}
|
||||
// RequestLatency is the latency metric that rest clients will update.
|
||||
RequestLatency LatencyMetric = noopLatency{}
|
||||
// RequestSize is the request size metric that rest clients will update.
|
||||
RequestSize SizeMetric = noopSize{}
|
||||
// ResponseSize is the response size metric that rest clients will update.
|
||||
ResponseSize SizeMetric = noopSize{}
|
||||
// RateLimiterLatency is the client side rate limiter latency metric.
|
||||
RateLimiterLatency LatencyMetric = noopLatency{}
|
||||
// RequestResult is the result metric that rest clients will update.
|
||||
@ -74,6 +83,8 @@ type RegisterOpts struct {
|
||||
ClientCertExpiry ExpiryMetric
|
||||
ClientCertRotationAge DurationMetric
|
||||
RequestLatency LatencyMetric
|
||||
RequestSize SizeMetric
|
||||
ResponseSize SizeMetric
|
||||
RateLimiterLatency LatencyMetric
|
||||
RequestResult ResultMetric
|
||||
ExecPluginCalls CallsMetric
|
||||
@ -92,6 +103,12 @@ func Register(opts RegisterOpts) {
|
||||
if opts.RequestLatency != nil {
|
||||
RequestLatency = opts.RequestLatency
|
||||
}
|
||||
if opts.RequestSize != nil {
|
||||
RequestSize = opts.RequestSize
|
||||
}
|
||||
if opts.ResponseSize != nil {
|
||||
ResponseSize = opts.ResponseSize
|
||||
}
|
||||
if opts.RateLimiterLatency != nil {
|
||||
RateLimiterLatency = opts.RateLimiterLatency
|
||||
}
|
||||
@ -116,6 +133,10 @@ type noopLatency struct{}
|
||||
|
||||
func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {}
|
||||
|
||||
type noopSize struct{}
|
||||
|
||||
func (noopSize) Observe(context.Context, string, string, float64) {}
|
||||
|
||||
type noopResult struct{}
|
||||
|
||||
func (noopResult) Increment(context.Context, string, string, string) {}
|
||||
|
4
vendor/k8s.io/client-go/tools/record/OWNERS
generated
vendored
4
vendor/k8s.io/client-go/tools/record/OWNERS
generated
vendored
@ -1,6 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- sig-instrumentation-reviewers
|
||||
- sig-instrumentation-reviewers
|
||||
approvers:
|
||||
- sig-instrumentation-approvers
|
||||
- sig-instrumentation-approvers
|
||||
|
2
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
2
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
@ -291,7 +291,7 @@ func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...int
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface {
|
||||
return e.StartEventWatcher(
|
||||
func(e *v1.Event) {
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message)
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
|
8
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
8
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
@ -268,7 +268,13 @@ func (rw *RetryWatcher) receive() {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(retryAfter)
|
||||
timer := time.NewTimer(retryAfter)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion)
|
||||
}, rw.minRestartDelay)
|
||||
|
Reference in New Issue
Block a user