rebase: update kubernetes to v1.21.2

Updated kubernetes packages to latest release.
resizefs package has been included into k8s.io/mount-utils
package. updated code to use the same.

Updates: #1968

Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
Rakshith R
2021-06-25 10:29:51 +05:30
committed by mergify[bot]
parent 8ce5ae16c1
commit 1b23d78113
1115 changed files with 98825 additions and 12365 deletions

View File

@ -20,7 +20,6 @@ reviewers:
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- erictune
- davidopp
- pmorie
@ -29,7 +28,6 @@ reviewers:
- soltysh
- jsafrane
- dims
- madhusudancs
- hongchaodeng
- krousey
- xiang90

View File

@ -26,54 +26,6 @@ import (
"k8s.io/klog/v2"
)
// NewDeltaFIFO returns a Queue which can be used to process changes to items.
//
// keyFunc is used to figure out what key an object should have. (It is
// exposed in the returned DeltaFIFO's KeyOf() method, with additional handling
// around deleted objects and queue state).
//
// 'knownObjects' may be supplied to modify the behavior of Delete,
// Replace, and Resync. It may be nil if you do not need those
// modifications.
//
// TODO: consider merging keyLister with this object, tracking a list of
// "known" keys when Pop() is called. Have to think about how that
// affects error retrying.
// NOTE: It is possible to misuse this and cause a race when using an
// external known object source.
// Whether there is a potential race depends on how the consumer
// modifies knownObjects. In Pop(), process function is called under
// lock, so it is safe to update data structures in it that need to be
// in sync with the queue (e.g. knownObjects).
//
// Example:
// In case of sharedIndexInformer being a consumer
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
// there is no race as knownObjects (s.indexer) is modified safely
// under DeltaFIFO's lock. The only exceptions are GetStore() and
// GetIndexer() methods, which expose ways to modify the underlying
// storage. Currently these two methods are used for creating Lister
// and internal tests.
//
// Also see the comment on DeltaFIFO.
//
// Warning: This constructs a DeltaFIFO that does not differentiate between
// events caused by a call to Replace (e.g., from a relist, which may
// contain object updates), and synthetic events caused by a periodic resync
// (which just emit the existing object). See https://issue.k8s.io/86015 for details.
//
// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})`
// instead to receive a `Replaced` event depending on the type.
//
// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects})
func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
return NewDeltaFIFOWithOptions(DeltaFIFOOptions{
KeyFunction: keyFunc,
KnownObjects: knownObjects,
})
}
// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are
// optional.
type DeltaFIFOOptions struct {
@ -99,25 +51,6 @@ type DeltaFIFOOptions struct {
EmitDeltaTypeReplaced bool
}
// NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to
// items. See also the comment on DeltaFIFO.
func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
if opts.KeyFunction == nil {
opts.KeyFunction = MetaNamespaceKeyFunc
}
f := &DeltaFIFO{
items: map[string]Deltas{},
queue: []string{},
keyFunc: opts.KeyFunction,
knownObjects: opts.KnownObjects,
emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced,
}
f.cond.L = &f.lock
return f
}
// DeltaFIFO is like FIFO, but differs in two ways. One is that the
// accumulator associated with a given object's key is not that object
// but rather a Deltas, which is a slice of Delta values for that
@ -128,8 +61,11 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
// Deleted if the older Deleted's object is a
// DeletedFinalStateUnknown.
//
// The other difference is that DeltaFIFO has an additional way that
// an object can be applied to an accumulator, called Sync.
// The other difference is that DeltaFIFO has two additional ways that
// an object can be applied to an accumulator: Replaced and Sync.
// If EmitDeltaTypeReplaced is not set to true, Sync will be used in
// replace events for backwards compatibility. Sync is used for periodic
// resync events.
//
// DeltaFIFO is a producer-consumer queue, where a Reflector is
// intended to be the producer, and the consumer is whatever calls
@ -193,6 +129,107 @@ type DeltaFIFO struct {
emitDeltaTypeReplaced bool
}
// DeltaType is the type of a change (addition, deletion, etc)
type DeltaType string
// Change type definition
const (
Added DeltaType = "Added"
Updated DeltaType = "Updated"
Deleted DeltaType = "Deleted"
// Replaced is emitted when we encountered watch errors and had to do a
// relist. We don't know if the replaced object has changed.
//
// NOTE: Previous versions of DeltaFIFO would use Sync for Replace events
// as well. Hence, Replaced is only emitted when the option
// EmitDeltaTypeReplaced is true.
Replaced DeltaType = "Replaced"
// Sync is for synthetic events during a periodic resync.
Sync DeltaType = "Sync"
)
// Delta is a member of Deltas (a list of Delta objects) which
// in its turn is the type stored by a DeltaFIFO. It tells you what
// change happened, and the object's state after* that change.
//
// [*] Unless the change is a deletion, and then you'll get the final
// state of the object before it was deleted.
type Delta struct {
Type DeltaType
Object interface{}
}
// Deltas is a list of one or more 'Delta's to an individual object.
// The oldest delta is at index 0, the newest delta is the last one.
type Deltas []Delta
// NewDeltaFIFO returns a Queue which can be used to process changes to items.
//
// keyFunc is used to figure out what key an object should have. (It is
// exposed in the returned DeltaFIFO's KeyOf() method, with additional handling
// around deleted objects and queue state).
//
// 'knownObjects' may be supplied to modify the behavior of Delete,
// Replace, and Resync. It may be nil if you do not need those
// modifications.
//
// TODO: consider merging keyLister with this object, tracking a list of
// "known" keys when Pop() is called. Have to think about how that
// affects error retrying.
// NOTE: It is possible to misuse this and cause a race when using an
// external known object source.
// Whether there is a potential race depends on how the consumer
// modifies knownObjects. In Pop(), process function is called under
// lock, so it is safe to update data structures in it that need to be
// in sync with the queue (e.g. knownObjects).
//
// Example:
// In case of sharedIndexInformer being a consumer
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
// there is no race as knownObjects (s.indexer) is modified safely
// under DeltaFIFO's lock. The only exceptions are GetStore() and
// GetIndexer() methods, which expose ways to modify the underlying
// storage. Currently these two methods are used for creating Lister
// and internal tests.
//
// Also see the comment on DeltaFIFO.
//
// Warning: This constructs a DeltaFIFO that does not differentiate between
// events caused by a call to Replace (e.g., from a relist, which may
// contain object updates), and synthetic events caused by a periodic resync
// (which just emit the existing object). See https://issue.k8s.io/86015 for details.
//
// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})`
// instead to receive a `Replaced` event depending on the type.
//
// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects})
func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
return NewDeltaFIFOWithOptions(DeltaFIFOOptions{
KeyFunction: keyFunc,
KnownObjects: knownObjects,
})
}
// NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to
// items. See also the comment on DeltaFIFO.
func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
if opts.KeyFunction == nil {
opts.KeyFunction = MetaNamespaceKeyFunc
}
f := &DeltaFIFO{
items: map[string]Deltas{},
queue: []string{},
keyFunc: opts.KeyFunction,
knownObjects: opts.KnownObjects,
emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced,
}
f.cond.L = &f.lock
return f
}
var (
_ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
)
@ -572,7 +609,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
f.populated = true
// While there shouldn't be any queued deletions in the initial
// population of the queue, it's better to be on the safe side.
f.initialPopulationCount = len(list) + queuedDeletions
f.initialPopulationCount = keys.Len() + queuedDeletions
}
return nil
@ -602,7 +639,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
if !f.populated {
f.populated = true
f.initialPopulationCount = len(list) + queuedDeletions
f.initialPopulationCount = keys.Len() + queuedDeletions
}
return nil
@ -673,39 +710,6 @@ type KeyGetter interface {
GetByKey(key string) (value interface{}, exists bool, err error)
}
// DeltaType is the type of a change (addition, deletion, etc)
type DeltaType string
// Change type definition
const (
Added DeltaType = "Added"
Updated DeltaType = "Updated"
Deleted DeltaType = "Deleted"
// Replaced is emitted when we encountered watch errors and had to do a
// relist. We don't know if the replaced object has changed.
//
// NOTE: Previous versions of DeltaFIFO would use Sync for Replace events
// as well. Hence, Replaced is only emitted when the option
// EmitDeltaTypeReplaced is true.
Replaced DeltaType = "Replaced"
// Sync is for synthetic events during a periodic resync.
Sync DeltaType = "Sync"
)
// Delta is the type stored by a DeltaFIFO. It tells you what change
// happened, and the object's state after* that change.
//
// [*] Unless the change is a deletion, and then you'll get the final
// state of the object before it was deleted.
type Delta struct {
Type DeltaType
Object interface{}
}
// Deltas is a list of one or more 'Delta's to an individual object.
// The oldest delta is at index 0, the newest delta is the last one.
type Deltas []Delta
// Oldest is a convenience function that returns the oldest delta, or
// nil if there are no deltas.
func (d Deltas) Oldest() *Delta {

View File

@ -216,13 +216,13 @@ var internalPackages = []string{"client-go/tools/cache/"}
// objects and subsequent deltas.
// Run will exit when stopCh is closed.
func (r *Reflector) Run(stopCh <-chan struct{}) {
klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
wait.BackoffUntil(func() {
if err := r.ListAndWatch(stopCh); err != nil {
r.watchErrorHandler(r, err)
}
}, r.backoffManager, true, stopCh)
klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
}
var (

View File

@ -23,7 +23,7 @@ import (
"io/ioutil"
"os"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/term"
clientauth "k8s.io/client-go/tools/auth"
)
@ -90,8 +90,8 @@ func promptForString(field string, r io.Reader, show bool) (result string, err e
_, err = fmt.Fscan(r, &result)
} else {
var data []byte
if terminal.IsTerminal(int(os.Stdin.Fd())) {
data, err = terminal.ReadPassword(int(os.Stdin.Fd()))
if term.IsTerminal(int(os.Stdin.Fd())) {
data, err = term.ReadPassword(int(os.Stdin.Fd()))
result = string(data)
} else {
return "", fmt.Errorf("error reading input for %s", field)

View File

@ -198,13 +198,13 @@ func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
if err != nil {
return nil, err
}
mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig)
mergo.Merge(clientConfig, userAuthPartialConfig, mergo.WithOverride)
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
if err != nil {
return nil, err
}
mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig)
mergo.Merge(clientConfig, serverAuthPartialConfig, mergo.WithOverride)
}
return clientConfig, nil
@ -225,7 +225,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo,
configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
configClientConfig.ServerName = configClusterInfo.TLSServerName
mergo.MergeWithOverwrite(mergedConfig, configClientConfig)
mergo.Merge(mergedConfig, configClientConfig, mergo.WithOverride)
return mergedConfig, nil
}
@ -294,8 +294,8 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
previouslyMergedConfig := mergedConfig
mergedConfig = &restclient.Config{}
mergo.MergeWithOverwrite(mergedConfig, promptedConfig)
mergo.MergeWithOverwrite(mergedConfig, previouslyMergedConfig)
mergo.Merge(mergedConfig, promptedConfig, mergo.WithOverride)
mergo.Merge(mergedConfig, previouslyMergedConfig, mergo.WithOverride)
config.promptedCredentials.username = mergedConfig.Username
config.promptedCredentials.password = mergedConfig.Password
}
@ -463,12 +463,12 @@ func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
mergedContext := clientcmdapi.NewContext()
if configContext, exists := contexts[contextName]; exists {
mergo.MergeWithOverwrite(mergedContext, configContext)
mergo.Merge(mergedContext, configContext, mergo.WithOverride)
} else if required {
return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
}
if config.overrides != nil {
mergo.MergeWithOverwrite(mergedContext, config.overrides.Context)
mergo.Merge(mergedContext, config.overrides.Context, mergo.WithOverride)
}
return *mergedContext, nil
@ -481,12 +481,12 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
mergedAuthInfo := clientcmdapi.NewAuthInfo()
if configAuthInfo, exists := authInfos[authInfoName]; exists {
mergo.MergeWithOverwrite(mergedAuthInfo, configAuthInfo)
mergo.Merge(mergedAuthInfo, configAuthInfo, mergo.WithOverride)
} else if required {
return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
}
if config.overrides != nil {
mergo.MergeWithOverwrite(mergedAuthInfo, config.overrides.AuthInfo)
mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo, mergo.WithOverride)
}
return *mergedAuthInfo, nil
@ -499,15 +499,15 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
mergedClusterInfo := clientcmdapi.NewCluster()
if config.overrides != nil {
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterDefaults)
mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults, mergo.WithOverride)
}
if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
mergo.MergeWithOverwrite(mergedClusterInfo, configClusterInfo)
mergo.Merge(mergedClusterInfo, configClusterInfo, mergo.WithOverride)
} else if required {
return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
}
if config.overrides != nil {
mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterInfo)
mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo, mergo.WithOverride)
}
// * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
@ -548,11 +548,12 @@ func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) {
}
func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) {
if config.inClusterConfigProvider == nil {
config.inClusterConfigProvider = restclient.InClusterConfig
inClusterConfigProvider := config.inClusterConfigProvider
if inClusterConfigProvider == nil {
inClusterConfigProvider = restclient.InClusterConfig
}
icc, err := config.inClusterConfigProvider()
icc, err := inClusterConfigProvider()
if err != nil {
return nil, err
}
@ -572,7 +573,7 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error)
}
}
return icc, err
return icc, nil
}
func (config *inClusterClientConfig) Namespace() (string, bool, error) {
@ -611,7 +612,7 @@ func (config *inClusterClientConfig) Possible() bool {
// to the default config.
func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
if kubeconfigPath == "" && masterUrl == "" {
klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
klog.Warning("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
kubeconfig, err := restclient.InClusterConfig()
if err == nil {
return kubeconfig, nil

View File

@ -374,7 +374,7 @@ func (p *persister) Persist(config map[string]string) error {
authInfo, ok := newConfig.AuthInfos[p.user]
if ok && authInfo.AuthProvider != nil {
authInfo.AuthProvider.Config = config
ModifyConfig(p.configAccess, *newConfig, false)
return ModifyConfig(p.configAccess, *newConfig, false)
}
return nil
}

View File

@ -18,10 +18,8 @@ package clientcmd
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
goruntime "runtime"
@ -48,24 +46,24 @@ const (
)
var (
RecommendedConfigDir = path.Join(homedir.HomeDir(), RecommendedHomeDir)
RecommendedHomeFile = path.Join(RecommendedConfigDir, RecommendedFileName)
RecommendedSchemaFile = path.Join(RecommendedConfigDir, RecommendedSchemaName)
RecommendedConfigDir = filepath.Join(homedir.HomeDir(), RecommendedHomeDir)
RecommendedHomeFile = filepath.Join(RecommendedConfigDir, RecommendedFileName)
RecommendedSchemaFile = filepath.Join(RecommendedConfigDir, RecommendedSchemaName)
)
// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions.
// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make
// sure existing config files are migrated to their new locations properly.
func currentMigrationRules() map[string]string {
oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig")
oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName)
migrationRules := map[string]string{}
migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile
var oldRecommendedHomeFileName string
if goruntime.GOOS == "windows" {
migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile
oldRecommendedHomeFileName = RecommendedFileName
} else {
oldRecommendedHomeFileName = ".kubeconfig"
}
return map[string]string{
RecommendedHomeFile: filepath.Join(os.Getenv("HOME"), RecommendedHomeDir, oldRecommendedHomeFileName),
}
return migrationRules
}
type ClientConfigLoader interface {
@ -227,7 +225,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
mapConfig := clientcmdapi.NewConfig()
for _, kubeconfig := range kubeconfigs {
mergo.MergeWithOverwrite(mapConfig, kubeconfig)
mergo.Merge(mapConfig, kubeconfig, mergo.WithOverride)
}
// merge all of the struct values in the reverse order so that priority is given correctly
@ -235,14 +233,14 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
nonMapConfig := clientcmdapi.NewConfig()
for i := len(kubeconfigs) - 1; i >= 0; i-- {
kubeconfig := kubeconfigs[i]
mergo.MergeWithOverwrite(nonMapConfig, kubeconfig)
mergo.Merge(nonMapConfig, kubeconfig, mergo.WithOverride)
}
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
// get the values we expect.
config := clientcmdapi.NewConfig()
mergo.MergeWithOverwrite(config, mapConfig)
mergo.MergeWithOverwrite(config, nonMapConfig)
mergo.Merge(config, mapConfig, mergo.WithOverride)
mergo.Merge(config, nonMapConfig, mergo.WithOverride)
if rules.ResolvePaths() {
if err := ResolveLocalPaths(config); err != nil {
@ -283,20 +281,15 @@ func (rules *ClientConfigLoadingRules) Migrate() error {
return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination)
}
in, err := os.Open(source)
data, err := ioutil.ReadFile(source)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(destination)
// destination is created with mode 0666 before umask
err = ioutil.WriteFile(destination, data, 0666)
if err != nil {
return err
}
defer out.Close()
if _, err = io.Copy(out, in); err != nil {
return err
}
}
return nil

View File

@ -7,7 +7,6 @@ reviewers:
- wojtek-t
- deads2k
- mikedanese
- gmarek
- timothysc
- ingvagabund
- resouer

View File

@ -145,10 +145,13 @@ func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interf
}
// NewFromKubeconfig will create a lock of a given type according to the input parameters.
// Timeout set for a client used to contact to Kubernetes should be lower than
// RenewDeadline to keep a single hung request from forcing a leader loss.
// Setting it to max(time.Second, RenewDeadline/2) as a reasonable heuristic.
func NewFromKubeconfig(lockType string, ns string, name string, rlc ResourceLockConfig, kubeconfig *restclient.Config, renewDeadline time.Duration) (Interface, error) {
// shallow copy, do not modify the kubeconfig
config := *kubeconfig
timeout := ((renewDeadline / time.Millisecond) / 2) * time.Millisecond
timeout := renewDeadline / 2
if timeout < time.Second {
timeout = time.Second
}

View File

@ -19,6 +19,7 @@ limitations under the License.
package metrics
import (
"context"
"net/url"
"sync"
"time"
@ -38,12 +39,18 @@ type ExpiryMetric interface {
// LatencyMetric observes client latency partitioned by verb and url.
type LatencyMetric interface {
Observe(verb string, u url.URL, latency time.Duration)
Observe(ctx context.Context, verb string, u url.URL, latency time.Duration)
}
// ResultMetric counts response codes partitioned by method and host.
type ResultMetric interface {
Increment(code string, method string, host string)
Increment(ctx context.Context, code string, method string, host string)
}
// CallsMetric counts calls that take place for a specific exec plugin.
type CallsMetric interface {
// Increment increments a counter per exitCode and callStatus.
Increment(exitCode int, callStatus string)
}
var (
@ -57,6 +64,9 @@ var (
RateLimiterLatency LatencyMetric = noopLatency{}
// RequestResult is the result metric that rest clients will update.
RequestResult ResultMetric = noopResult{}
// ExecPluginCalls is the number of calls made to an exec plugin, partitioned by
// exit code and call status.
ExecPluginCalls CallsMetric = noopCalls{}
)
// RegisterOpts contains all the metrics to register. Metrics may be nil.
@ -66,6 +76,7 @@ type RegisterOpts struct {
RequestLatency LatencyMetric
RateLimiterLatency LatencyMetric
RequestResult ResultMetric
ExecPluginCalls CallsMetric
}
// Register registers metrics for the rest client to use. This can
@ -87,6 +98,9 @@ func Register(opts RegisterOpts) {
if opts.RequestResult != nil {
RequestResult = opts.RequestResult
}
if opts.ExecPluginCalls != nil {
ExecPluginCalls = opts.ExecPluginCalls
}
})
}
@ -100,8 +114,12 @@ func (noopExpiry) Set(*time.Time) {}
type noopLatency struct{}
func (noopLatency) Observe(string, url.URL, time.Duration) {}
func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {}
type noopResult struct{}
func (noopResult) Increment(string, string, string) {}
func (noopResult) Increment(context.Context, string, string, string) {}
type noopCalls struct{}
func (noopCalls) Increment(int, string) {}

View File

@ -1,28 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- derekwaynecarr
- caesarxuchao
- vishh
- mikedanese
- liggitt
- nikhiljindal
- erictune
- pmorie
- dchen1107
- saad-ali
- luxas
- yifan-gu
- mwielgus
- timothysc
- jsafrane
- dims
- krousey
- a-robinson
- aveshagarwal
- resouer
- cjcullen
- sig-instrumentation-reviewers
approvers:
- sig-instrumentation-approvers

View File

@ -155,21 +155,21 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re
// Creates a new event broadcaster.
func NewBroadcaster() EventBroadcaster {
return &eventBroadcasterImpl{
Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
sleepDuration: defaultSleepDuration,
}
}
func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster {
return &eventBroadcasterImpl{
Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
sleepDuration: sleepDuration,
}
}
func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster {
return &eventBroadcasterImpl{
Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
sleepDuration: defaultSleepDuration,
options: options,
}
@ -323,7 +323,7 @@ type recorderImpl struct {
clock clock.Clock
}
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) {
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) {
ref, err := ref.GetReference(recorder.scheme, object)
if err != nil {
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
@ -338,15 +338,18 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
event := recorder.makeEvent(ref, annotations, eventtype, reason, message)
event.Source = recorder.source
go func() {
// NOTE: events should be a non-blocking operation
defer utilruntime.HandleCrash()
recorder.Action(watch.Added, event)
}()
// NOTE: events should be a non-blocking operation, but we also need to not
// put this in a goroutine, otherwise we'll race to write to a closed channel
// when we go to shut down this broadcaster. Just drop events if we get overloaded,
// and log an error if that happens (we've configured the broadcaster to drop
// outgoing events anyway).
if sent := recorder.ActionOrDrop(watch.Added, event); !sent {
klog.Errorf("unable to record event: too many queued events, dropped event %#v", event)
}
}
func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message)
recorder.generateEvent(object, nil, eventtype, reason, message)
}
func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
@ -354,7 +357,7 @@ func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, m
}
func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...))
recorder.generateEvent(object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event {

View File

@ -142,6 +142,10 @@ func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) {
go func() {
defer runtime.HandleCrash()
defer wg.Done()
// make sure, packet in queue can be consumed.
// block in queue may lead to deadlock in conn.server
// issue: https://github.com/kubernetes/kubernetes/issues/96339
defer io.Copy(ioutil.Discard, p.remoteStdout)
if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil {
runtime.HandleError(err)
@ -158,6 +162,7 @@ func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) {
go func() {
defer runtime.HandleCrash()
defer wg.Done()
defer io.Copy(ioutil.Discard, p.remoteStderr)
if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil {
runtime.HandleError(err)

View File

@ -127,7 +127,7 @@ func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (
// We have no means of passing the additional information down using
// watch API based on watch.Event but the caller can filter such
// objects by checking if metadata.deletionTimestamp is set
obj = staleObj
obj = staleObj.Obj
}
e.push(watch.Event{

View File

@ -116,24 +116,24 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
return false, 0
case io.ErrUnexpectedEOF:
klog.V(1).Infof("Watch closed with unexpected EOF: %v", err)
klog.V(1).InfoS("Watch closed with unexpected EOF", "err", err)
return false, 0
default:
msg := "Watch failed: %v"
msg := "Watch failed"
if net.IsProbableEOF(err) || net.IsTimeout(err) {
klog.V(5).Infof(msg, err)
klog.V(5).InfoS(msg, "err", err)
// Retry
return false, 0
}
klog.Errorf(msg, err)
klog.ErrorS(err, msg)
// Retry
return false, 0
}
if watcher == nil {
klog.Error("Watch returned nil watcher")
klog.ErrorS(nil, "Watch returned nil watcher")
// Retry
return false, 0
}
@ -144,11 +144,11 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
for {
select {
case <-rw.stopChan:
klog.V(4).Info("Stopping RetryWatcher.")
klog.V(4).InfoS("Stopping RetryWatcher.")
return true, 0
case event, ok := <-ch:
if !ok {
klog.V(4).Infof("Failed to get event! Re-creating the watcher. Last RV: %s", rw.lastResourceVersion)
klog.V(4).InfoS("Failed to get event! Re-creating the watcher.", "resourceVersion", rw.lastResourceVersion)
return false, 0
}