mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to 1.28.0 in main
updating kubernetes to 1.28.0 in the main repo. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
b2fdc269c3
commit
ff3e84ad67
4
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
approvers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
@ -11,7 +10,6 @@ approvers:
|
||||
- ncdc
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
@ -26,3 +24,5 @@ reviewers:
|
||||
- dims
|
||||
- ingvagabund
|
||||
- ncdc
|
||||
emeritus_approvers:
|
||||
- lavalamp
|
||||
|
4
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@ -18,7 +18,6 @@ package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -148,9 +147,6 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
if c.config.WatchErrorHandler != nil {
|
||||
r.watchErrorHandler = c.config.WatchErrorHandler
|
||||
}
|
||||
if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 {
|
||||
r.UseWatchList = true
|
||||
}
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
|
65
vendor/k8s.io/client-go/tools/cache/object-names.go
generated
vendored
Normal file
65
vendor/k8s.io/client-go/tools/cache/object-names.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ObjectName is a reference to an object of some implicit kind
|
||||
type ObjectName struct {
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
// NewObjectName constructs a new one
|
||||
func NewObjectName(namespace, name string) ObjectName {
|
||||
return ObjectName{Namespace: namespace, Name: name}
|
||||
}
|
||||
|
||||
// Parts is the inverse of the constructor
|
||||
func (objName ObjectName) Parts() (namespace, name string) {
|
||||
return objName.Namespace, objName.Name
|
||||
}
|
||||
|
||||
// String returns the standard string encoding,
|
||||
// which is designed to match the historical behavior of MetaNamespaceKeyFunc.
|
||||
// Note this behavior is different from the String method of types.NamespacedName.
|
||||
func (objName ObjectName) String() string {
|
||||
if len(objName.Namespace) > 0 {
|
||||
return objName.Namespace + "/" + objName.Name
|
||||
}
|
||||
return objName.Name
|
||||
}
|
||||
|
||||
// ParseObjectName tries to parse the standard encoding
|
||||
func ParseObjectName(str string) (ObjectName, error) {
|
||||
var objName ObjectName
|
||||
var err error
|
||||
objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str)
|
||||
return objName, err
|
||||
}
|
||||
|
||||
// NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName
|
||||
func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName {
|
||||
return NewObjectName(nn.Namespace, nn.Name)
|
||||
}
|
||||
|
||||
// AsNamespacedName rebrands as a NamespacedName
|
||||
func (objName ObjectName) AsNamespacedName() types.NamespacedName {
|
||||
return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name}
|
||||
}
|
30
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
30
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -69,9 +70,7 @@ type Reflector struct {
|
||||
listerWatcher ListerWatcher
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
resyncPeriod time.Duration
|
||||
resyncPeriod time.Duration
|
||||
// clock allows tests to manipulate time
|
||||
clock clock.Clock
|
||||
// paginatedResult defines whether pagination should be forced for list calls.
|
||||
@ -220,11 +219,10 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S
|
||||
// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
|
||||
// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
|
||||
// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
|
||||
initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
|
||||
clock: reflectorClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
|
||||
clock: reflectorClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
}
|
||||
|
||||
if r.name == "" {
|
||||
@ -239,6 +237,10 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S
|
||||
r.expectedGVK = getExpectedGVKFromObject(expectedType)
|
||||
}
|
||||
|
||||
if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 {
|
||||
r.UseWatchList = true
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
@ -420,7 +422,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case <-r.initConnBackoffManager.Backoff().C():
|
||||
case <-r.backoffManager.Backoff().C():
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -446,7 +448,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
case <-r.initConnBackoffManager.Backoff().C():
|
||||
case <-r.backoffManager.Backoff().C():
|
||||
continue
|
||||
}
|
||||
case apierrors.IsInternalError(err) && retry.ShouldRetry():
|
||||
@ -508,7 +510,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
pager.PageSize = 0
|
||||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options)
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
@ -517,7 +519,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
@ -555,7 +557,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
items, err := meta.ExtractListWithAlloc(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
@ -599,7 +601,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
|
||||
isErrorRetriableWithSideEffectsFn := func(err error) bool {
|
||||
if canRetry := isWatchErrorRetriable(err); canRetry {
|
||||
klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
<-r.backoffManager.Backoff().C()
|
||||
return true
|
||||
}
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
|
37
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
37
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -459,29 +459,30 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed")
|
||||
return
|
||||
}
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
Transformer: s.transform,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
ObjectDescription: s.objectDescription,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
WatchErrorHandler: s.watchErrorHandler,
|
||||
}
|
||||
|
||||
func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
Transformer: s.transform,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
ObjectDescription: s.objectDescription,
|
||||
FullResyncPeriod: s.resyncCheckPeriod,
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
WatchErrorHandler: s.watchErrorHandler,
|
||||
}
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.controller.(*controller).clock = s.clock
|
||||
s.started = true
|
||||
|
31
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
31
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Store is a generic object storage and processing interface. A
|
||||
@ -99,20 +100,38 @@ type ExplicitKey string
|
||||
// The key uses the format <namespace>/<name> unless <namespace> is empty, then
|
||||
// it's just <name>.
|
||||
//
|
||||
// TODO: replace key-as-string with a key-as-struct so that this
|
||||
// packing/unpacking won't be necessary.
|
||||
// Clients that want a structured alternative can use ObjectToName or MetaObjectToName.
|
||||
// Note: this would not be a client that wants a key for a Store because those are
|
||||
// necessarily strings.
|
||||
//
|
||||
// TODO maybe some day?: change Store to be keyed differently
|
||||
func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if key, ok := obj.(ExplicitKey); ok {
|
||||
return string(key), nil
|
||||
}
|
||||
objName, err := ObjectToName(obj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return objName.String(), nil
|
||||
}
|
||||
|
||||
// ObjectToName returns the structured name for the given object,
|
||||
// if indeed it can be viewed as a metav1.Object.
|
||||
func ObjectToName(obj interface{}) (ObjectName, error) {
|
||||
meta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("object has no meta: %v", err)
|
||||
return ObjectName{}, fmt.Errorf("object has no meta: %v", err)
|
||||
}
|
||||
if len(meta.GetNamespace()) > 0 {
|
||||
return meta.GetNamespace() + "/" + meta.GetName(), nil
|
||||
return MetaObjectToName(meta), nil
|
||||
}
|
||||
|
||||
// MetaObjectToName returns the structured name for the given object
|
||||
func MetaObjectToName(obj metav1.Object) ObjectName {
|
||||
if len(obj.GetNamespace()) > 0 {
|
||||
return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()}
|
||||
}
|
||||
return meta.GetName(), nil
|
||||
return ObjectName{Namespace: "", Name: obj.GetName()}
|
||||
}
|
||||
|
||||
// SplitMetaNamespaceKey returns the namespace and name that
|
||||
|
14
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
14
vendor/k8s.io/client-go/tools/clientcmd/api/types.go
generated
vendored
@ -67,7 +67,7 @@ type Preferences struct {
|
||||
type Cluster struct {
|
||||
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
LocationOfOrigin string
|
||||
LocationOfOrigin string `json:"-"`
|
||||
// Server is the address of the kubernetes cluster (https://hostname:port).
|
||||
Server string `json:"server"`
|
||||
// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
|
||||
@ -107,7 +107,7 @@ type Cluster struct {
|
||||
type AuthInfo struct {
|
||||
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
LocationOfOrigin string
|
||||
LocationOfOrigin string `json:"-"`
|
||||
// ClientCertificate is the path to a client cert file for TLS.
|
||||
// +optional
|
||||
ClientCertificate string `json:"client-certificate,omitempty"`
|
||||
@ -159,7 +159,7 @@ type AuthInfo struct {
|
||||
type Context struct {
|
||||
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
LocationOfOrigin string
|
||||
LocationOfOrigin string `json:"-"`
|
||||
// Cluster is the name of the cluster for this context
|
||||
Cluster string `json:"cluster"`
|
||||
// AuthInfo is the name of the authInfo for this context
|
||||
@ -252,7 +252,7 @@ type ExecConfig struct {
|
||||
// recommended as one of the prime benefits of exec plugins is that no secrets need
|
||||
// to be stored directly in the kubeconfig.
|
||||
// +k8s:conversion-gen=false
|
||||
Config runtime.Object
|
||||
Config runtime.Object `json:"-"`
|
||||
|
||||
// InteractiveMode determines this plugin's relationship with standard input. Valid
|
||||
// values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this
|
||||
@ -264,7 +264,7 @@ type ExecConfig struct {
|
||||
// client.authentication.k8s.io/v1beta1, then this field is optional and defaults
|
||||
// to "IfAvailable" when unset. Otherwise, this field is required.
|
||||
// +optional
|
||||
InteractiveMode ExecInteractiveMode
|
||||
InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"`
|
||||
|
||||
// StdinUnavailable indicates whether the exec authenticator can pass standard
|
||||
// input through to this exec plugin. For example, a higher level entity might be using
|
||||
@ -272,14 +272,14 @@ type ExecConfig struct {
|
||||
// plugin to use standard input. This is kept here in order to keep all of the exec configuration
|
||||
// together, but it is never serialized.
|
||||
// +k8s:conversion-gen=false
|
||||
StdinUnavailable bool
|
||||
StdinUnavailable bool `json:"-"`
|
||||
|
||||
// StdinUnavailableMessage is an optional message to be displayed when the exec authenticator
|
||||
// cannot successfully run this exec plugin because it needs to use standard input and
|
||||
// StdinUnavailable is true. For example, a process that is already using standard input to
|
||||
// read user instructions might set this to "used by my-program to read user instructions".
|
||||
// +k8s:conversion-gen=false
|
||||
StdinUnavailableMessage string
|
||||
StdinUnavailableMessage string `json:"-"`
|
||||
}
|
||||
|
||||
var _ fmt.Stringer = new(ExecConfig)
|
||||
|
24
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
24
vendor/k8s.io/client-go/tools/clientcmd/loader.go
generated
vendored
@ -128,6 +128,28 @@ type ClientConfigLoadingRules struct {
|
||||
// WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not.
|
||||
// In case of missing files, it warns the user about the missing files.
|
||||
WarnIfAllMissing bool
|
||||
|
||||
// Warner is the warning log callback to use in case of missing files.
|
||||
Warner WarningHandler
|
||||
}
|
||||
|
||||
// WarningHandler allows to set the logging function to use
|
||||
type WarningHandler func(error)
|
||||
|
||||
func (handler WarningHandler) Warn(err error) {
|
||||
if handler == nil {
|
||||
klog.V(1).Info(err)
|
||||
} else {
|
||||
handler(err)
|
||||
}
|
||||
}
|
||||
|
||||
type MissingConfigError struct {
|
||||
Missing []string
|
||||
}
|
||||
|
||||
func (c MissingConfigError) Error() string {
|
||||
return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", "))
|
||||
}
|
||||
|
||||
// ClientConfigLoadingRules implements the ClientConfigLoader interface.
|
||||
@ -219,7 +241,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
|
||||
}
|
||||
|
||||
if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 {
|
||||
klog.Warningf("Config not found: %s", strings.Join(missingList, ", "))
|
||||
rules.Warner.Warn(MissingConfigError{Missing: missingList})
|
||||
}
|
||||
|
||||
// first merge all of our maps
|
||||
|
5
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
5
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@ -99,6 +99,11 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
|
||||
if lec.Lock == nil {
|
||||
return nil, fmt.Errorf("Lock must not be nil.")
|
||||
}
|
||||
id := lec.Lock.Identity()
|
||||
if id == "" {
|
||||
return nil, fmt.Errorf("Lock identity is empty")
|
||||
}
|
||||
|
||||
le := LeaderElector{
|
||||
config: lec,
|
||||
clock: clock.RealClock{},
|
||||
|
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
126
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@ -1,126 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// TODO: This is almost a exact replica of Endpoints lock.
|
||||
// going forwards as we self host more and more components
|
||||
// and use ConfigMaps as the means to pass that configuration
|
||||
// data we will likely move to deprecate the Endpoints lock.
|
||||
|
||||
type configMapLock struct {
|
||||
// ConfigMapMeta should contain a Name and a Namespace of a
|
||||
// ConfigMapMeta object that the LeaderElector will attempt to lead.
|
||||
ConfigMapMeta metav1.ObjectMeta
|
||||
Client corev1client.ConfigMapsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
cm *v1.ConfigMap
|
||||
}
|
||||
|
||||
// Get returns the election record from a ConfigMap Annotation
|
||||
func (cml *configMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cml.cm = cm
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (cml *configMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(ctx, &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cml.ConfigMapMeta.Name,
|
||||
Namespace: cml.ConfigMapMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update an existing annotation on a given resource.
|
||||
func (cml *configMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if cml.cm == nil {
|
||||
return errors.New("configmap not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm = cm
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (cml *configMapLock) RecordEvent(s string) {
|
||||
if cml.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
|
||||
subject := &v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "ConfigMap"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
cml.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (cml *configMapLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (cml *configMapLock) Identity() string {
|
||||
return cml.LockConfig.Identity
|
||||
}
|
121
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
121
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type endpointsLock struct {
|
||||
// EndpointsMeta should contain a Name and a Namespace of an
|
||||
// Endpoints object that the LeaderElector will attempt to lead.
|
||||
EndpointsMeta metav1.ObjectMeta
|
||||
Client corev1client.EndpointsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
e *v1.Endpoints
|
||||
}
|
||||
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *endpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
|
||||
var record LeaderElectionRecord
|
||||
ep, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
el.e = ep
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
|
||||
recordBytes := []byte(recordStr)
|
||||
if found {
|
||||
if err := json.Unmarshal(recordBytes, &record); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &record, recordBytes, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (el *endpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(ctx, &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: el.EndpointsMeta.Name,
|
||||
Namespace: el.EndpointsMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (el *endpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
|
||||
if el.e == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e = e
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (el *endpointsLock) RecordEvent(s string) {
|
||||
if el.LockConfig.EventRecorder == nil {
|
||||
return
|
||||
}
|
||||
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
|
||||
subject := &v1.Endpoints{ObjectMeta: el.e.ObjectMeta}
|
||||
// Populate the type meta, so we don't have to get it from the schema
|
||||
subject.Kind = "Endpoints"
|
||||
subject.APIVersion = v1.SchemeGroupVersion.String()
|
||||
el.LockConfig.EventRecorder.Eventf(subject, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (el *endpointsLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
|
||||
}
|
||||
|
||||
// Identity returns the Identity of the lock
|
||||
func (el *endpointsLock) Identity() string {
|
||||
return el.LockConfig.Identity
|
||||
}
|
42
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
42
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
@ -34,7 +34,7 @@ const (
|
||||
endpointsResourceLock = "endpoints"
|
||||
configMapsResourceLock = "configmaps"
|
||||
LeasesResourceLock = "leases"
|
||||
// When using EndpointsLeasesResourceLock, you need to ensure that
|
||||
// When using endpointsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// endpoint objects.
|
||||
@ -67,8 +67,8 @@ const (
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
EndpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using ConfigMapsLeasesResourceLock, you need to ensure that
|
||||
endpointsLeasesResourceLock = "endpointsleases"
|
||||
// When using configMapsLeasesResourceLock, you need to ensure that
|
||||
// API Priority & Fairness is configured with non-default flow-schema
|
||||
// that will catch the necessary operations on leader-election related
|
||||
// configmap objects.
|
||||
@ -101,7 +101,7 @@ const (
|
||||
// serviceAccount:
|
||||
// name: '*'
|
||||
// namespace: kube-system
|
||||
ConfigMapsLeasesResourceLock = "configmapsleases"
|
||||
configMapsLeasesResourceLock = "configmapsleases"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
@ -164,22 +164,6 @@ type Interface interface {
|
||||
|
||||
// Manufacture will create a lock of a given type according to the input parameters
|
||||
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
|
||||
endpointsLock := &endpointsLock{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
configmapLock := &configMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: coreClient,
|
||||
LockConfig: rlc,
|
||||
}
|
||||
leaseLock := &LeaseLock{
|
||||
LeaseMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
@ -190,21 +174,15 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
|
||||
}
|
||||
switch lockType {
|
||||
case endpointsResourceLock:
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s", EndpointsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("endpoints lock is removed, migrate to %s (using version v0.27.x)", endpointsLeasesResourceLock)
|
||||
case configMapsResourceLock:
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s", ConfigMapsLeasesResourceLock)
|
||||
return nil, fmt.Errorf("configmaps lock is removed, migrate to %s (using version v0.27.x)", configMapsLeasesResourceLock)
|
||||
case LeasesResourceLock:
|
||||
return leaseLock, nil
|
||||
case EndpointsLeasesResourceLock:
|
||||
return &MultiLock{
|
||||
Primary: endpointsLock,
|
||||
Secondary: leaseLock,
|
||||
}, nil
|
||||
case ConfigMapsLeasesResourceLock:
|
||||
return &MultiLock{
|
||||
Primary: configmapLock,
|
||||
Secondary: leaseLock,
|
||||
}, nil
|
||||
case endpointsLeasesResourceLock:
|
||||
return nil, fmt.Errorf("endpointsleases lock is removed, migrate to %s", LeasesResourceLock)
|
||||
case configMapsLeasesResourceLock:
|
||||
return nil, fmt.Errorf("configmapsleases lock is removed, migrated to %s", LeasesResourceLock)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
|
||||
}
|
||||
|
48
vendor/k8s.io/client-go/tools/metrics/metrics.go
generated
vendored
48
vendor/k8s.io/client-go/tools/metrics/metrics.go
generated
vendored
@ -42,6 +42,10 @@ type LatencyMetric interface {
|
||||
Observe(ctx context.Context, verb string, u url.URL, latency time.Duration)
|
||||
}
|
||||
|
||||
type ResolverLatencyMetric interface {
|
||||
Observe(ctx context.Context, host string, latency time.Duration)
|
||||
}
|
||||
|
||||
// SizeMetric observes client response size partitioned by verb and host.
|
||||
type SizeMetric interface {
|
||||
Observe(ctx context.Context, verb string, host string, size float64)
|
||||
@ -64,6 +68,17 @@ type RetryMetric interface {
|
||||
IncrementRetry(ctx context.Context, code string, method string, host string)
|
||||
}
|
||||
|
||||
// TransportCacheMetric shows the number of entries in the internal transport cache
|
||||
type TransportCacheMetric interface {
|
||||
Observe(value int)
|
||||
}
|
||||
|
||||
// TransportCreateCallsMetric counts the number of times a transport is created
|
||||
// partitioned by the result of the cache: hit, miss, uncacheable
|
||||
type TransportCreateCallsMetric interface {
|
||||
Increment(result string)
|
||||
}
|
||||
|
||||
var (
|
||||
// ClientCertExpiry is the expiry time of a client certificate
|
||||
ClientCertExpiry ExpiryMetric = noopExpiry{}
|
||||
@ -71,6 +86,8 @@ var (
|
||||
ClientCertRotationAge DurationMetric = noopDuration{}
|
||||
// RequestLatency is the latency metric that rest clients will update.
|
||||
RequestLatency LatencyMetric = noopLatency{}
|
||||
// ResolverLatency is the latency metric that DNS resolver will update
|
||||
ResolverLatency ResolverLatencyMetric = noopResolverLatency{}
|
||||
// RequestSize is the request size metric that rest clients will update.
|
||||
RequestSize SizeMetric = noopSize{}
|
||||
// ResponseSize is the response size metric that rest clients will update.
|
||||
@ -85,6 +102,12 @@ var (
|
||||
// RequestRetry is the retry metric that tracks the number of
|
||||
// retries sent to the server.
|
||||
RequestRetry RetryMetric = noopRetry{}
|
||||
// TransportCacheEntries is the metric that tracks the number of entries in the
|
||||
// internal transport cache.
|
||||
TransportCacheEntries TransportCacheMetric = noopTransportCache{}
|
||||
// TransportCreateCalls is the metric that counts the number of times a new transport
|
||||
// is created
|
||||
TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{}
|
||||
)
|
||||
|
||||
// RegisterOpts contains all the metrics to register. Metrics may be nil.
|
||||
@ -92,12 +115,15 @@ type RegisterOpts struct {
|
||||
ClientCertExpiry ExpiryMetric
|
||||
ClientCertRotationAge DurationMetric
|
||||
RequestLatency LatencyMetric
|
||||
ResolverLatency ResolverLatencyMetric
|
||||
RequestSize SizeMetric
|
||||
ResponseSize SizeMetric
|
||||
RateLimiterLatency LatencyMetric
|
||||
RequestResult ResultMetric
|
||||
ExecPluginCalls CallsMetric
|
||||
RequestRetry RetryMetric
|
||||
TransportCacheEntries TransportCacheMetric
|
||||
TransportCreateCalls TransportCreateCallsMetric
|
||||
}
|
||||
|
||||
// Register registers metrics for the rest client to use. This can
|
||||
@ -113,6 +139,9 @@ func Register(opts RegisterOpts) {
|
||||
if opts.RequestLatency != nil {
|
||||
RequestLatency = opts.RequestLatency
|
||||
}
|
||||
if opts.ResolverLatency != nil {
|
||||
ResolverLatency = opts.ResolverLatency
|
||||
}
|
||||
if opts.RequestSize != nil {
|
||||
RequestSize = opts.RequestSize
|
||||
}
|
||||
@ -131,6 +160,12 @@ func Register(opts RegisterOpts) {
|
||||
if opts.RequestRetry != nil {
|
||||
RequestRetry = opts.RequestRetry
|
||||
}
|
||||
if opts.TransportCacheEntries != nil {
|
||||
TransportCacheEntries = opts.TransportCacheEntries
|
||||
}
|
||||
if opts.TransportCreateCalls != nil {
|
||||
TransportCreateCalls = opts.TransportCreateCalls
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@ -146,6 +181,11 @@ type noopLatency struct{}
|
||||
|
||||
func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {}
|
||||
|
||||
type noopResolverLatency struct{}
|
||||
|
||||
func (n noopResolverLatency) Observe(ctx context.Context, host string, latency time.Duration) {
|
||||
}
|
||||
|
||||
type noopSize struct{}
|
||||
|
||||
func (noopSize) Observe(context.Context, string, string, float64) {}
|
||||
@ -161,3 +201,11 @@ func (noopCalls) Increment(int, string) {}
|
||||
type noopRetry struct{}
|
||||
|
||||
func (noopRetry) IncrementRetry(context.Context, string, string, string) {}
|
||||
|
||||
type noopTransportCache struct{}
|
||||
|
||||
func (noopTransportCache) Observe(int) {}
|
||||
|
||||
type noopTransportCreateCalls struct{}
|
||||
|
||||
func (noopTransportCreateCalls) Increment(string) {}
|
||||
|
36
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
36
vendor/k8s.io/client-go/tools/pager/pager.go
generated
vendored
@ -73,7 +73,23 @@ func New(fn ListPageFunc) *ListPager {
|
||||
// List returns a single list object, but attempts to retrieve smaller chunks from the
|
||||
// server to reduce the impact on the server. If the chunk attempt fails, it will load
|
||||
// the full list instead. The Limit field on options, if unset, will default to the page size.
|
||||
//
|
||||
// If items in the returned list are retained for different durations, and you want to avoid
|
||||
// retaining the whole slice returned by p.PageFn as long as any item is referenced,
|
||||
// use ListWithAlloc instead.
|
||||
func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) {
|
||||
return p.list(ctx, options, false)
|
||||
}
|
||||
|
||||
// ListWithAlloc works like List, but avoids retaining references to the items slice returned by p.PageFn.
|
||||
// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn.
|
||||
//
|
||||
// If the items in the returned list are not retained, or are retained for the same duration, use List instead for memory efficiency.
|
||||
func (p *ListPager) ListWithAlloc(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) {
|
||||
return p.list(ctx, options, true)
|
||||
}
|
||||
|
||||
func (p *ListPager) list(ctx context.Context, options metav1.ListOptions, allocNew bool) (runtime.Object, bool, error) {
|
||||
if options.Limit == 0 {
|
||||
options.Limit = p.PageSize
|
||||
}
|
||||
@ -123,7 +139,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
||||
list.ResourceVersion = m.GetResourceVersion()
|
||||
list.SelfLink = m.GetSelfLink()
|
||||
}
|
||||
if err := meta.EachListItem(obj, func(obj runtime.Object) error {
|
||||
eachListItemFunc := meta.EachListItem
|
||||
if allocNew {
|
||||
eachListItemFunc = meta.EachListItemWithAlloc
|
||||
}
|
||||
if err := eachListItemFunc(obj, func(obj runtime.Object) error {
|
||||
list.Items = append(list.Items, obj)
|
||||
return nil
|
||||
}); err != nil {
|
||||
@ -156,12 +176,26 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
|
||||
//
|
||||
// Items are retrieved in chunks from the server to reduce the impact on the server with up to
|
||||
// ListPager.PageBufferSize chunks buffered concurrently in the background.
|
||||
//
|
||||
// If items passed to fn are retained for different durations, and you want to avoid
|
||||
// retaining the whole slice returned by p.PageFn as long as any item is referenced,
|
||||
// use EachListItemWithAlloc instead.
|
||||
func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
|
||||
return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
|
||||
return meta.EachListItem(obj, fn)
|
||||
})
|
||||
}
|
||||
|
||||
// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice returned by p.PageFn.
|
||||
// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn.
|
||||
//
|
||||
// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
|
||||
func (p *ListPager) EachListItemWithAlloc(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
|
||||
return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
|
||||
return meta.EachListItemWithAlloc(obj, fn)
|
||||
})
|
||||
}
|
||||
|
||||
// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on
|
||||
// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does
|
||||
// not return an error, any error encountered while retrieving the list from the server is
|
||||
|
5
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
5
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
@ -274,7 +274,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
|
||||
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
|
||||
return true
|
||||
case *errors.StatusError:
|
||||
if errors.IsAlreadyExists(err) {
|
||||
if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
} else {
|
||||
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
@ -357,6 +357,9 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
|
||||
event := recorder.makeEvent(ref, annotations, eventtype, reason, message)
|
||||
event.Source = recorder.source
|
||||
|
||||
event.ReportingInstance = recorder.source.Host
|
||||
event.ReportingController = recorder.source.Component
|
||||
|
||||
// NOTE: events should be a non-blocking operation, but we also need to not
|
||||
// put this in a goroutine, otherwise we'll race to write to a closed channel
|
||||
// when we go to shut down this broadcaster. Just drop events if we get overloaded,
|
||||
|
7
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
7
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
@ -24,10 +24,9 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@ -191,7 +190,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
errObject := apierrors.FromObject(event.Object)
|
||||
statusErr, ok := errObject.(*apierrors.StatusError)
|
||||
if !ok {
|
||||
klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object))
|
||||
klog.Error(fmt.Sprintf("Received an error which is not *metav1.Status but %s", dump.Pretty(event.Object)))
|
||||
// Retry unknown errors
|
||||
return false, 0
|
||||
}
|
||||
@ -220,7 +219,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
|
||||
// Log here so we have a record of hitting the unexpected error
|
||||
// and we can whitelist some error codes if we missed any that are expected.
|
||||
klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object))
|
||||
klog.V(5).Info(fmt.Sprintf("Retrying after unexpected error: %s", dump.Pretty(event.Object)))
|
||||
|
||||
// Retry
|
||||
return false, statusDelay
|
||||
|
Reference in New Issue
Block a user