mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update k8s.io packages to v0.29.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
328a264202
commit
f080b9e0c9
22
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
22
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -334,12 +334,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
if !apierrors.IsInvalid(err) {
|
||||
return err
|
||||
}
|
||||
klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic")
|
||||
klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err)
|
||||
fallbackToList = true
|
||||
// Ensure that we won't accidentally pass some garbage down the watch.
|
||||
// ensure that we won't accidentally pass some garbage down the watch.
|
||||
w = nil
|
||||
}
|
||||
}
|
||||
@ -351,6 +348,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name)
|
||||
|
||||
resyncerrc := make(chan error, 1)
|
||||
cancelCh := make(chan struct{})
|
||||
defer close(cancelCh)
|
||||
@ -395,6 +394,11 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
case <-stopCh:
|
||||
// we can only end up here when the stopCh
|
||||
// was closed after a successful watchlist or list request
|
||||
if w != nil {
|
||||
w.Stop()
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
@ -670,6 +674,12 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
|
||||
// "k8s.io/initial-events-end" bookmark.
|
||||
initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())})
|
||||
r.setIsLastSyncResourceVersionUnavailable(false)
|
||||
|
||||
// we utilize the temporaryStore to ensure independence from the current store implementation.
|
||||
// as of today, the store is implemented as a queue and will be drained by the higher-level
|
||||
// component as soon as it finishes replacing the content.
|
||||
checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore)
|
||||
|
||||
if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil {
|
||||
return nil, fmt.Errorf("unable to sync watch-list result: %v", err)
|
||||
}
|
||||
@ -762,7 +772,7 @@ loop:
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok {
|
||||
if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" {
|
||||
if exitOnInitialEventsEndBookmark != nil {
|
||||
*exitOnInitialEventsEndBookmark = true
|
||||
}
|
||||
|
119
vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
generated
vendored
Normal file
119
vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var dataConsistencyDetectionEnabled = false
|
||||
|
||||
func init() {
|
||||
dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR"))
|
||||
}
|
||||
|
||||
// checkWatchListConsistencyIfRequested performs a data consistency check only when
|
||||
// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup.
|
||||
//
|
||||
// The consistency check is meant to be enforced only in the CI, not in production.
|
||||
// The check ensures that data retrieved by the watch-list api call
|
||||
// is exactly the same as data received by the standard list api call.
|
||||
//
|
||||
// Note that this function will panic when data inconsistency is detected.
|
||||
// This is intentional because we want to catch it in the CI.
|
||||
func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) {
|
||||
if !dataConsistencyDetectionEnabled {
|
||||
return
|
||||
}
|
||||
checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store)
|
||||
}
|
||||
|
||||
// checkWatchListConsistency exists solely for testing purposes.
|
||||
// we cannot use checkWatchListConsistencyIfRequested because
|
||||
// it is guarded by an environmental variable.
|
||||
// we cannot manipulate the environmental variable because
|
||||
// it will affect other tests in this package.
|
||||
func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) {
|
||||
klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity)
|
||||
opts := metav1.ListOptions{
|
||||
ResourceVersion: lastSyncedResourceVersion,
|
||||
ResourceVersionMatch: metav1.ResourceVersionMatchExact,
|
||||
}
|
||||
var list runtime.Object
|
||||
err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) {
|
||||
list, err = listerWatcher.List(opts)
|
||||
if err != nil {
|
||||
// the consistency check will only be enabled in the CI
|
||||
// and LIST calls in general will be retired by the client-go library
|
||||
// if we fail simply log and retry
|
||||
klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
rawListItems, err := meta.ExtractListWithAlloc(list)
|
||||
if err != nil {
|
||||
panic(err) // this should never happen
|
||||
}
|
||||
|
||||
listItems := toMetaObjectSliceOrDie(rawListItems)
|
||||
storeItems := toMetaObjectSliceOrDie(store.List())
|
||||
|
||||
sort.Sort(byUID(listItems))
|
||||
sort.Sort(byUID(storeItems))
|
||||
|
||||
if !cmp.Equal(listItems, storeItems) {
|
||||
klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems))
|
||||
msg := "data inconsistency detected for the watch-list feature, panicking!"
|
||||
panic(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type byUID []metav1.Object
|
||||
|
||||
func (a byUID) Len() int { return len(a) }
|
||||
func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() }
|
||||
func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object {
|
||||
result := make([]metav1.Object, len(s))
|
||||
for i, v := range s {
|
||||
m, err := meta.Accessor(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
result[i] = m
|
||||
}
|
||||
return result
|
||||
}
|
2
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -334,11 +334,9 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
|
||||
},
|
||||
stopCh)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("stop requested")
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(4).Infof("caches populated")
|
||||
return true
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
4
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
@ -49,12 +49,12 @@ type InClusterConfig interface {
|
||||
Possible() bool
|
||||
}
|
||||
|
||||
// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name
|
||||
// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name
|
||||
func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig {
|
||||
return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}}
|
||||
}
|
||||
|
||||
// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader
|
||||
// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader
|
||||
func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig {
|
||||
return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader}
|
||||
}
|
||||
|
121
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
121
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
@ -81,27 +81,27 @@ type EventSinkImpl struct {
|
||||
}
|
||||
|
||||
// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Create(event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
func (e *EventSinkImpl) Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't create an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Create(context.TODO(), event, metav1.CreateOptions{})
|
||||
return e.Interface.Events(event.Namespace).Create(ctx, event, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Update(event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
func (e *EventSinkImpl) Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't update an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Update(context.TODO(), event, metav1.UpdateOptions{})
|
||||
return e.Interface.Events(event.Namespace).Update(ctx, event, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Patch(event *eventsv1.Event, data []byte) (*eventsv1.Event, error) {
|
||||
func (e *EventSinkImpl) Patch(ctx context.Context, event *eventsv1.Event, data []byte) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't patch an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Patch(context.TODO(), event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
return e.Interface.Events(event.Namespace).Patch(ctx, event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
}
|
||||
|
||||
// NewBroadcaster Creates a new event broadcaster.
|
||||
@ -124,13 +124,13 @@ func (e *eventBroadcasterImpl) Shutdown() {
|
||||
}
|
||||
|
||||
// refreshExistingEventSeries refresh events TTL
|
||||
func (e *eventBroadcasterImpl) refreshExistingEventSeries() {
|
||||
func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) {
|
||||
// TODO: Investigate whether lock contention won't be a problem
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
for isomorphicKey, event := range e.eventCache {
|
||||
if event.Series != nil {
|
||||
if recordedEvent, retry := recordEvent(e.sink, event); !retry {
|
||||
if recordedEvent, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
if recordedEvent != nil {
|
||||
e.eventCache[isomorphicKey] = recordedEvent
|
||||
}
|
||||
@ -142,7 +142,7 @@ func (e *eventBroadcasterImpl) refreshExistingEventSeries() {
|
||||
// finishSeries checks if a series has ended and either:
|
||||
// - write final count to the apiserver
|
||||
// - delete a singleton event (i.e. series field is nil) from the cache
|
||||
func (e *eventBroadcasterImpl) finishSeries() {
|
||||
func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) {
|
||||
// TODO: Investigate whether lock contention won't be a problem
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
@ -150,7 +150,7 @@ func (e *eventBroadcasterImpl) finishSeries() {
|
||||
eventSerie := event.Series
|
||||
if eventSerie != nil {
|
||||
if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) {
|
||||
if _, retry := recordEvent(e.sink, event); !retry {
|
||||
if _, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
delete(e.eventCache, isomorphicKey)
|
||||
}
|
||||
}
|
||||
@ -161,13 +161,13 @@ func (e *eventBroadcasterImpl) finishSeries() {
|
||||
}
|
||||
|
||||
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder {
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger {
|
||||
hostname, _ := os.Hostname()
|
||||
reportingInstance := reportingController + "-" + hostname
|
||||
return &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}
|
||||
return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()}
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.Clock) {
|
||||
func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1.Event, clock clock.Clock) {
|
||||
// Make a copy before modification, because there could be multiple listeners.
|
||||
eventCopy := event.DeepCopy()
|
||||
go func() {
|
||||
@ -197,7 +197,7 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C
|
||||
}()
|
||||
if evToRecord != nil {
|
||||
// TODO: Add a metric counting the number of recording attempts
|
||||
e.attemptRecording(evToRecord)
|
||||
e.attemptRecording(ctx, evToRecord)
|
||||
// We don't want the new recorded Event to be reflected in the
|
||||
// client's cache because server-side mutations could mess with the
|
||||
// aggregation mechanism used by the client.
|
||||
@ -205,40 +205,45 @@ func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.C
|
||||
}()
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) attemptRecording(event *eventsv1.Event) *eventsv1.Event {
|
||||
func (e *eventBroadcasterImpl) attemptRecording(ctx context.Context, event *eventsv1.Event) {
|
||||
tries := 0
|
||||
for {
|
||||
if recordedEvent, retry := recordEvent(e.sink, event); !retry {
|
||||
return recordedEvent
|
||||
if _, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
return
|
||||
}
|
||||
tries++
|
||||
if tries >= maxTriesPerEvent {
|
||||
klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
|
||||
return nil
|
||||
klog.FromContext(ctx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event)
|
||||
return
|
||||
}
|
||||
// Randomize sleep so that various clients won't all be
|
||||
// synced up if the master goes down.
|
||||
time.Sleep(wait.Jitter(e.sleepDuration, 0.25))
|
||||
// synced up if the master goes down. Give up when
|
||||
// the context is canceled.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(wait.Jitter(e.sleepDuration, 0.25)):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) {
|
||||
func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) {
|
||||
var newEvent *eventsv1.Event
|
||||
var err error
|
||||
isEventSeries := event.Series != nil
|
||||
if isEventSeries {
|
||||
patch, patchBytesErr := createPatchBytesForSeries(event)
|
||||
if patchBytesErr != nil {
|
||||
klog.Errorf("Unable to calculate diff, no merge is possible: %v", patchBytesErr)
|
||||
klog.FromContext(ctx).Error(patchBytesErr, "Unable to calculate diff, no merge is possible")
|
||||
return nil, false
|
||||
}
|
||||
newEvent, err = sink.Patch(event, patch)
|
||||
newEvent, err = sink.Patch(ctx, event, patch)
|
||||
}
|
||||
// Update can fail because the event may have been removed and it no longer exists.
|
||||
if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) {
|
||||
// Making sure that ResourceVersion is empty on creation
|
||||
event.ResourceVersion = ""
|
||||
newEvent, err = sink.Create(event)
|
||||
newEvent, err = sink.Create(ctx, event)
|
||||
}
|
||||
if err == nil {
|
||||
return newEvent, false
|
||||
@ -248,7 +253,7 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool)
|
||||
switch err.(type) {
|
||||
case *restclient.RequestConstructionError:
|
||||
// We will construct the request the same next time, so don't keep trying.
|
||||
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event)
|
||||
return nil, false
|
||||
case *errors.StatusError:
|
||||
if errors.IsAlreadyExists(err) {
|
||||
@ -260,9 +265,9 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool)
|
||||
if isEventSeries {
|
||||
return nil, true
|
||||
}
|
||||
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err)
|
||||
} else {
|
||||
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event)
|
||||
}
|
||||
return nil, false
|
||||
case *errors.UnexpectedObjectError:
|
||||
@ -271,7 +276,7 @@ func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool)
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
@ -307,29 +312,38 @@ func getKey(event *eventsv1.Event) eventKey {
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
// TODO: this function should also return an error.
|
||||
//
|
||||
// Deprecated: use StartLogging instead.
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() {
|
||||
stopWatcher, err := e.StartEventWatcher(
|
||||
func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
klog.Errorf("unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note)
|
||||
})
|
||||
logger := klog.Background().V(int(verbosity))
|
||||
stopWatcher, err := e.StartLogging(logger)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to start event watcher: '%v'", err)
|
||||
logger.Error(err, "Failed to start event watcher")
|
||||
return func() {}
|
||||
}
|
||||
return stopWatcher
|
||||
}
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the structured logger.
|
||||
// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`).
|
||||
// The returned function can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) {
|
||||
return e.StartEventWatcher(
|
||||
func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
logger.Error(nil, "unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
logger.Info("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note)
|
||||
})
|
||||
}
|
||||
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value is used to stop recording
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) {
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
@ -345,37 +359,42 @@ func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime
|
||||
return watcher.Stop, nil
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) error {
|
||||
func (e *eventBroadcasterImpl) startRecordingEvents(ctx context.Context) error {
|
||||
eventHandler := func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
klog.Errorf("unexpected type, expected eventsv1.Event")
|
||||
klog.FromContext(ctx).Error(nil, "unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
e.recordToSink(event, clock.RealClock{})
|
||||
e.recordToSink(ctx, event, clock.RealClock{})
|
||||
}
|
||||
stopWatcher, err := e.StartEventWatcher(eventHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-stopCh
|
||||
<-ctx.Done()
|
||||
stopWatcher()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
// Deprecated: use StartRecordingToSinkWithContext instead.
|
||||
func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) {
|
||||
go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh)
|
||||
go wait.Until(e.finishSeries, finishTime, stopCh)
|
||||
err := e.startRecordingEvents(stopCh)
|
||||
err := e.StartRecordingToSinkWithContext(wait.ContextForChannel(stopCh))
|
||||
if err != nil {
|
||||
klog.Errorf("unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
klog.Background().Error(err, "Failed to start recording to sink")
|
||||
}
|
||||
}
|
||||
|
||||
// StartRecordingToSinkWithContext starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
func (e *eventBroadcasterImpl) StartRecordingToSinkWithContext(ctx context.Context) error {
|
||||
go wait.UntilWithContext(ctx, e.refreshExistingEventSeries, refreshTime)
|
||||
go wait.UntilWithContext(ctx, e.finishSeries, finishTime)
|
||||
return e.startRecordingEvents(ctx)
|
||||
}
|
||||
|
||||
type eventBroadcasterAdapterImpl struct {
|
||||
coreClient typedv1core.EventsGetter
|
||||
coreBroadcaster record.EventBroadcaster
|
||||
@ -409,14 +428,14 @@ func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorder {
|
||||
func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorderLogger {
|
||||
if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil {
|
||||
return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name)
|
||||
}
|
||||
return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name))
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorder {
|
||||
func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger {
|
||||
return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name})
|
||||
}
|
||||
|
||||
|
27
vendor/k8s.io/client-go/tools/events/event_recorder.go
generated
vendored
27
vendor/k8s.io/client-go/tools/events/event_recorder.go
generated
vendored
@ -40,12 +40,33 @@ type recorderImpl struct {
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
var _ EventRecorder = &recorderImpl{}
|
||||
|
||||
func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
recorder.eventf(klog.Background(), regarding, related, eventtype, reason, action, note, args...)
|
||||
}
|
||||
|
||||
type recorderImplLogger struct {
|
||||
*recorderImpl
|
||||
logger klog.Logger
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &recorderImplLogger{}
|
||||
|
||||
func (recorder *recorderImplLogger) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
recorder.eventf(recorder.logger, regarding, related, eventtype, reason, action, note, args...)
|
||||
}
|
||||
|
||||
func (recorder *recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return &recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger}
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
timestamp := metav1.MicroTime{Time: time.Now()}
|
||||
message := fmt.Sprintf(note, args...)
|
||||
refRegarding, err := reference.GetReference(recorder.scheme, regarding)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message)
|
||||
logger.Error(err, "Could not construct reference, will not report event", "object", regarding, "eventType", eventtype, "reason", reason, "message", message)
|
||||
return
|
||||
}
|
||||
|
||||
@ -53,11 +74,11 @@ func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.O
|
||||
if related != nil {
|
||||
refRelated, err = reference.GetReference(recorder.scheme, related)
|
||||
if err != nil {
|
||||
klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err)
|
||||
logger.V(9).Info("Could not construct reference", "object", related, "err", err)
|
||||
}
|
||||
}
|
||||
if !util.ValidateEventType(eventtype) {
|
||||
klog.Errorf("Unsupported event type: '%v'", eventtype)
|
||||
logger.Error(nil, "Unsupported event type", "eventType", eventtype)
|
||||
return
|
||||
}
|
||||
event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action)
|
||||
|
7
vendor/k8s.io/client-go/tools/events/fake.go
generated
vendored
7
vendor/k8s.io/client-go/tools/events/fake.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FakeRecorder is used as a fake during tests. It is thread safe. It is usable
|
||||
@ -29,6 +30,8 @@ type FakeRecorder struct {
|
||||
Events chan string
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &FakeRecorder{}
|
||||
|
||||
// Eventf emits an event
|
||||
func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
@ -36,6 +39,10 @@ func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFakeRecorder creates new fake event recorder with event channel with
|
||||
// buffer of given size.
|
||||
func NewFakeRecorder(bufferSize int) *FakeRecorder {
|
||||
|
45
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
45
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
@ -17,39 +17,30 @@ limitations under the License.
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
eventsv1 "k8s.io/api/events/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
internalevents "k8s.io/client-go/tools/internal/events"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// EventRecorder knows how to record events on behalf of an EventSource.
|
||||
type EventRecorder interface {
|
||||
// Eventf constructs an event from the given information and puts it in the queue for sending.
|
||||
// 'regarding' is the object this event is about. Event will make a reference-- or you may also
|
||||
// pass a reference to the object directly.
|
||||
// 'related' is the secondary object for more complex actions. E.g. when regarding object triggers
|
||||
// a creation or deletion of related object.
|
||||
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
|
||||
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
|
||||
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
|
||||
// to automate handling of events, so imagine people writing switch statements to handle them.
|
||||
// You want to make that easy.
|
||||
// 'action' explains what happened with regarding/what action did the ReportingController
|
||||
// (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.)
|
||||
// take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter).
|
||||
// 'note' is intended to be human readable.
|
||||
Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{})
|
||||
}
|
||||
type EventRecorder = internalevents.EventRecorder
|
||||
type EventRecorderLogger = internalevents.EventRecorderLogger
|
||||
|
||||
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||
type EventBroadcaster interface {
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster.
|
||||
// Deprecated: use StartRecordingToSinkWithContext instead.
|
||||
StartRecordingToSink(stopCh <-chan struct{})
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster.
|
||||
StartRecordingToSinkWithContext(ctx context.Context) error
|
||||
|
||||
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||
// with the event source set to the given event source.
|
||||
NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder
|
||||
NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger
|
||||
|
||||
// StartEventWatcher enables you to watch for emitted events without usage
|
||||
// of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests).
|
||||
@ -59,8 +50,14 @@ type EventBroadcaster interface {
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured
|
||||
// logging function. The return value can be ignored or used to stop recording, if desired.
|
||||
// Deprecated: use StartLogging instead.
|
||||
StartStructuredLogging(verbosity klog.Level) func()
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the structured logger.
|
||||
// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`).
|
||||
// The returned function can be ignored or used to stop recording, if desired.
|
||||
StartLogging(logger klog.Logger) (func(), error)
|
||||
|
||||
// Shutdown shuts down the broadcaster
|
||||
Shutdown()
|
||||
}
|
||||
@ -70,9 +67,9 @@ type EventBroadcaster interface {
|
||||
// It is assumed that EventSink will return the same sorts of errors as
|
||||
// client-go's REST client.
|
||||
type EventSink interface {
|
||||
Create(event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Update(event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Patch(oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error)
|
||||
Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Patch(ctx context.Context, oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error)
|
||||
}
|
||||
|
||||
// EventBroadcasterAdapter is a auxiliary interface to simplify migration to
|
||||
@ -85,10 +82,10 @@ type EventBroadcasterAdapter interface {
|
||||
StartRecordingToSink(stopCh <-chan struct{})
|
||||
|
||||
// NewRecorder creates a new Event Recorder with specified name.
|
||||
NewRecorder(name string) EventRecorder
|
||||
NewRecorder(name string) EventRecorderLogger
|
||||
|
||||
// DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name.
|
||||
DeprecatedNewLegacyRecorder(name string) record.EventRecorder
|
||||
DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger
|
||||
|
||||
// Shutdown shuts down the broadcaster.
|
||||
Shutdown()
|
||||
|
59
vendor/k8s.io/client-go/tools/internal/events/interfaces.go
generated
vendored
Normal file
59
vendor/k8s.io/client-go/tools/internal/events/interfaces.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package internal is needed to break an import cycle: record.EventRecorderAdapter
|
||||
// needs this interface definition to implement it, but event.NewEventBroadcasterAdapter
|
||||
// needs record.NewBroadcaster. Therefore this interface cannot be in event/interfaces.go.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// EventRecorder knows how to record events on behalf of an EventSource.
|
||||
type EventRecorder interface {
|
||||
// Eventf constructs an event from the given information and puts it in the queue for sending.
|
||||
// 'regarding' is the object this event is about. Event will make a reference-- or you may also
|
||||
// pass a reference to the object directly.
|
||||
// 'related' is the secondary object for more complex actions. E.g. when regarding object triggers
|
||||
// a creation or deletion of related object.
|
||||
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
|
||||
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
|
||||
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
|
||||
// to automate handling of events, so imagine people writing switch statements to handle them.
|
||||
// You want to make that easy.
|
||||
// 'action' explains what happened with regarding/what action did the ReportingController
|
||||
// (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.)
|
||||
// take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter).
|
||||
// 'note' is intended to be human readable.
|
||||
Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{})
|
||||
}
|
||||
|
||||
// EventRecorderLogger extends EventRecorder such that a logger can
|
||||
// be set for methods in EventRecorder. Normally, those methods
|
||||
// uses the global default logger to record errors and debug messages.
|
||||
// If that is not desired, use WithLogger to provide a logger instance.
|
||||
type EventRecorderLogger interface {
|
||||
EventRecorder
|
||||
|
||||
// WithLogger replaces the context used for logging. This is a cheap call
|
||||
// and meant to be used for contextual logging:
|
||||
// recorder := ...
|
||||
// logger := klog.FromContext(ctx)
|
||||
// recorder.WithLogger(logger).Eventf(...)
|
||||
WithLogger(logger klog.Logger) EventRecorderLogger
|
||||
}
|
184
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
184
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
internalevents "k8s.io/client-go/tools/internal/events"
|
||||
"k8s.io/client-go/tools/record/util"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/klog/v2"
|
||||
@ -110,6 +111,21 @@ type EventRecorder interface {
|
||||
AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{})
|
||||
}
|
||||
|
||||
// EventRecorderLogger extends EventRecorder such that a logger can
|
||||
// be set for methods in EventRecorder. Normally, those methods
|
||||
// uses the global default logger to record errors and debug messages.
|
||||
// If that is not desired, use WithLogger to provide a logger instance.
|
||||
type EventRecorderLogger interface {
|
||||
EventRecorder
|
||||
|
||||
// WithLogger replaces the context used for logging. This is a cheap call
|
||||
// and meant to be used for contextual logging:
|
||||
// recorder := ...
|
||||
// logger := klog.FromContext(ctx)
|
||||
// recorder.WithLogger(logger).Eventf(...)
|
||||
WithLogger(logger klog.Logger) EventRecorderLogger
|
||||
}
|
||||
|
||||
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||
type EventBroadcaster interface {
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given
|
||||
@ -131,7 +147,7 @@ type EventBroadcaster interface {
|
||||
|
||||
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||
// with the event source set to the given event source.
|
||||
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder
|
||||
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger
|
||||
|
||||
// Shutdown shuts down the broadcaster. Once the broadcaster is shut
|
||||
// down, it will only try to record an event in a sink once before
|
||||
@ -142,12 +158,14 @@ type EventBroadcaster interface {
|
||||
// EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder
|
||||
// implementing the new "k8s.io/client-go/tools/events".EventRecorder interface.
|
||||
type EventRecorderAdapter struct {
|
||||
recorder EventRecorder
|
||||
recorder EventRecorderLogger
|
||||
}
|
||||
|
||||
var _ internalevents.EventRecorder = &EventRecorderAdapter{}
|
||||
|
||||
// NewEventRecorderAdapter returns an adapter implementing the new
|
||||
// "k8s.io/client-go/tools/events".EventRecorder interface.
|
||||
func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter {
|
||||
func NewEventRecorderAdapter(recorder EventRecorderLogger) *EventRecorderAdapter {
|
||||
return &EventRecorderAdapter{
|
||||
recorder: recorder,
|
||||
}
|
||||
@ -158,28 +176,76 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re
|
||||
a.recorder.Eventf(regarding, eventtype, reason, note, args...)
|
||||
}
|
||||
|
||||
func (a *EventRecorderAdapter) WithLogger(logger klog.Logger) internalevents.EventRecorderLogger {
|
||||
return &EventRecorderAdapter{
|
||||
recorder: a.recorder.WithLogger(logger),
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new event broadcaster.
|
||||
func NewBroadcaster() EventBroadcaster {
|
||||
return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration)
|
||||
func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster {
|
||||
c := config{
|
||||
sleepDuration: defaultSleepDuration,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&c)
|
||||
}
|
||||
eventBroadcaster := &eventBroadcasterImpl{
|
||||
Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
|
||||
sleepDuration: c.sleepDuration,
|
||||
options: c.CorrelatorOptions,
|
||||
}
|
||||
ctx := c.Context
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
} else {
|
||||
// Calling Shutdown is not required when a context was provided:
|
||||
// when the context is canceled, this goroutine will shut down
|
||||
// the broadcaster.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
eventBroadcaster.Broadcaster.Shutdown()
|
||||
}()
|
||||
}
|
||||
eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx)
|
||||
return eventBroadcaster
|
||||
}
|
||||
|
||||
func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster {
|
||||
return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration)
|
||||
return NewBroadcaster(WithSleepDuration(sleepDuration))
|
||||
}
|
||||
|
||||
func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster {
|
||||
eventBroadcaster := newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration)
|
||||
eventBroadcaster.options = options
|
||||
return eventBroadcaster
|
||||
return NewBroadcaster(WithCorrelatorOptions(options))
|
||||
}
|
||||
|
||||
func newEventBroadcaster(broadcaster *watch.Broadcaster, sleepDuration time.Duration) *eventBroadcasterImpl {
|
||||
eventBroadcaster := &eventBroadcasterImpl{
|
||||
Broadcaster: broadcaster,
|
||||
sleepDuration: sleepDuration,
|
||||
func WithCorrelatorOptions(options CorrelatorOptions) BroadcasterOption {
|
||||
return func(c *config) {
|
||||
c.CorrelatorOptions = options
|
||||
}
|
||||
eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(context.Background())
|
||||
return eventBroadcaster
|
||||
}
|
||||
|
||||
// WithContext sets a context for the broadcaster. Canceling the context will
|
||||
// shut down the broadcaster, Shutdown doesn't need to be called. The context
|
||||
// can also be used to provide a logger.
|
||||
func WithContext(ctx context.Context) BroadcasterOption {
|
||||
return func(c *config) {
|
||||
c.Context = ctx
|
||||
}
|
||||
}
|
||||
|
||||
func WithSleepDuration(sleepDuration time.Duration) BroadcasterOption {
|
||||
return func(c *config) {
|
||||
c.sleepDuration = sleepDuration
|
||||
}
|
||||
}
|
||||
|
||||
type BroadcasterOption func(*config)
|
||||
|
||||
type config struct {
|
||||
CorrelatorOptions
|
||||
context.Context
|
||||
sleepDuration time.Duration
|
||||
}
|
||||
|
||||
type eventBroadcasterImpl struct {
|
||||
@ -220,12 +286,12 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve
|
||||
}
|
||||
tries := 0
|
||||
for {
|
||||
if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) {
|
||||
if recordEvent(e.cancelationCtx, sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) {
|
||||
break
|
||||
}
|
||||
tries++
|
||||
if tries >= maxTriesPerEvent {
|
||||
klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
|
||||
klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event)
|
||||
break
|
||||
}
|
||||
|
||||
@ -237,7 +303,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve
|
||||
}
|
||||
select {
|
||||
case <-e.cancelationCtx.Done():
|
||||
klog.Errorf("Unable to write event '%#v' (broadcaster is shut down)", event)
|
||||
klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (broadcaster is shut down)", "event", event)
|
||||
return
|
||||
case <-time.After(delay):
|
||||
}
|
||||
@ -248,7 +314,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve
|
||||
// was successfully recorded or discarded, false if it should be retried.
|
||||
// If updateExistingEvent is false, it creates a new event, otherwise it updates
|
||||
// existing event.
|
||||
func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool {
|
||||
func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool {
|
||||
var newEvent *v1.Event
|
||||
var err error
|
||||
if updateExistingEvent {
|
||||
@ -271,13 +337,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
|
||||
switch err.(type) {
|
||||
case *restclient.RequestConstructionError:
|
||||
// We will construct the request the same next time, so don't keep trying.
|
||||
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event)
|
||||
return true
|
||||
case *errors.StatusError:
|
||||
if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err)
|
||||
} else {
|
||||
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event)
|
||||
}
|
||||
return true
|
||||
case *errors.UnexpectedObjectError:
|
||||
@ -286,7 +352,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err)
|
||||
klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)", "event", event)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -299,12 +365,15 @@ func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...int
|
||||
})
|
||||
}
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function.
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to a structured logger.
|
||||
// The logger is retrieved from a context if the broadcaster was constructed with a context, otherwise
|
||||
// the global default is used.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface {
|
||||
loggerV := klog.FromContext(e.cancelationCtx).V(int(verbosity))
|
||||
return e.StartEventWatcher(
|
||||
func(e *v1.Event) {
|
||||
klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message)
|
||||
loggerV.Info("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
@ -313,26 +382,32 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err)
|
||||
klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)")
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
for watchEvent := range watcher.ResultChan() {
|
||||
event, ok := watchEvent.Object.(*v1.Event)
|
||||
if !ok {
|
||||
// This is all local, so there's no reason this should
|
||||
// ever happen.
|
||||
continue
|
||||
for {
|
||||
select {
|
||||
case <-e.cancelationCtx.Done():
|
||||
watcher.Stop()
|
||||
return
|
||||
case watchEvent := <-watcher.ResultChan():
|
||||
event, ok := watchEvent.Object.(*v1.Event)
|
||||
if !ok {
|
||||
// This is all local, so there's no reason this should
|
||||
// ever happen.
|
||||
continue
|
||||
}
|
||||
eventHandler(event)
|
||||
}
|
||||
eventHandler(event)
|
||||
}
|
||||
}()
|
||||
return watcher
|
||||
}
|
||||
|
||||
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder {
|
||||
return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}}
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger {
|
||||
return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()}
|
||||
}
|
||||
|
||||
type recorderImpl struct {
|
||||
@ -342,15 +417,17 @@ type recorderImpl struct {
|
||||
clock clock.PassiveClock
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) {
|
||||
var _ EventRecorder = &recorderImpl{}
|
||||
|
||||
func (recorder *recorderImpl) generateEvent(logger klog.Logger, object runtime.Object, annotations map[string]string, eventtype, reason, message string) {
|
||||
ref, err := ref.GetReference(recorder.scheme, object)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
|
||||
logger.Error(err, "Could not construct reference, will not report event", "object", object, "eventType", eventtype, "reason", reason, "message", message)
|
||||
return
|
||||
}
|
||||
|
||||
if !util.ValidateEventType(eventtype) {
|
||||
klog.Errorf("Unsupported event type: '%v'", eventtype)
|
||||
logger.Error(nil, "Unsupported event type", "eventType", eventtype)
|
||||
return
|
||||
}
|
||||
|
||||
@ -367,16 +444,16 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
|
||||
// outgoing events anyway).
|
||||
sent, err := recorder.ActionOrDrop(watch.Added, event)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to record event: %v (will not retry!)", err)
|
||||
logger.Error(err, "Unable to record event (will not retry!)")
|
||||
return
|
||||
}
|
||||
if !sent {
|
||||
klog.Errorf("unable to record event: too many queued events, dropped event %#v", event)
|
||||
logger.Error(nil, "Unable to record event: too many queued events, dropped event", "event", event)
|
||||
}
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
recorder.generateEvent(object, nil, eventtype, reason, message)
|
||||
recorder.generateEvent(klog.Background(), object, nil, eventtype, reason, message)
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
@ -384,7 +461,7 @@ func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, m
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.generateEvent(object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
recorder.generateEvent(klog.Background(), object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event {
|
||||
@ -408,3 +485,26 @@ func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map
|
||||
Type: eventtype,
|
||||
}
|
||||
}
|
||||
|
||||
type recorderImplLogger struct {
|
||||
*recorderImpl
|
||||
logger klog.Logger
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &recorderImplLogger{}
|
||||
|
||||
func (recorder recorderImplLogger) Event(object runtime.Object, eventtype, reason, message string) {
|
||||
recorder.recorderImpl.generateEvent(recorder.logger, object, nil, eventtype, reason, message)
|
||||
}
|
||||
|
||||
func (recorder recorderImplLogger) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder recorderImplLogger) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
|
||||
recorder.generateEvent(recorder.logger, object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...))
|
||||
}
|
||||
|
||||
func (recorder recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger}
|
||||
}
|
||||
|
7
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
7
vendor/k8s.io/client-go/tools/record/fake.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FakeRecorder is used as a fake during tests. It is thread safe. It is usable
|
||||
@ -31,6 +32,8 @@ type FakeRecorder struct {
|
||||
IncludeObject bool
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &FakeRecorder{}
|
||||
|
||||
func objectString(object runtime.Object, includeObject bool) string {
|
||||
if !includeObject {
|
||||
return ""
|
||||
@ -68,6 +71,10 @@ func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[st
|
||||
f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...)
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFakeRecorder creates new fake event recorder with event channel with
|
||||
// buffer of given size.
|
||||
func NewFakeRecorder(bufferSize int) *FakeRecorder {
|
||||
|
57
vendor/k8s.io/client-go/tools/remotecommand/fallback.go
generated
vendored
Normal file
57
vendor/k8s.io/client-go/tools/remotecommand/fallback.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
var _ Executor = &fallbackExecutor{}
|
||||
|
||||
type fallbackExecutor struct {
|
||||
primary Executor
|
||||
secondary Executor
|
||||
shouldFallback func(error) bool
|
||||
}
|
||||
|
||||
// NewFallbackExecutor creates an Executor that first attempts to use the
|
||||
// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial
|
||||
// websocket "StreamWithContext" call fails.
|
||||
// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) {
|
||||
return &fallbackExecutor{
|
||||
primary: primary,
|
||||
secondary: secondary,
|
||||
shouldFallback: shouldFallback,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream is deprecated. Please use "StreamWithContext".
|
||||
func (f *fallbackExecutor) Stream(options StreamOptions) error {
|
||||
return f.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// StreamWithContext initially attempts to call "StreamWithContext" using the
|
||||
// primary executor, falling back to calling the secondary executor if the
|
||||
// initial primary call to upgrade to a websocket connection fails.
|
||||
func (f *fallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
err := f.primary.StreamWithContext(ctx, options)
|
||||
if f.shouldFallback(err) {
|
||||
return f.secondary.StreamWithContext(ctx, options)
|
||||
}
|
||||
return err
|
||||
}
|
124
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
124
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
@ -18,17 +18,10 @@ package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
)
|
||||
|
||||
// StreamOptions holds information pertaining to the current streaming session:
|
||||
@ -63,120 +56,3 @@ type streamCreator interface {
|
||||
type streamProtocolHandler interface {
|
||||
stream(conn streamCreator) error
|
||||
}
|
||||
|
||||
// streamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type streamExecutor struct {
|
||||
upgrader spdy.Upgrader
|
||||
transport http.RoundTripper
|
||||
|
||||
method string
|
||||
url *url.URL
|
||||
protocols []string
|
||||
}
|
||||
|
||||
// NewSPDYExecutor connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams.
|
||||
func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForTransports connects to the provided server using the given transport,
|
||||
// upgrades the response using the given upgrader to multiplexed bidirectional streams.
|
||||
func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
return NewSPDYExecutorForProtocols(
|
||||
transport, upgrader, method, url,
|
||||
remotecommand.StreamProtocolV4Name,
|
||||
remotecommand.StreamProtocolV3Name,
|
||||
remotecommand.StreamProtocolV2Name,
|
||||
remotecommand.StreamProtocolV1Name,
|
||||
)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
|
||||
// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
|
||||
func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
|
||||
return &streamExecutor{
|
||||
upgrader: upgrader,
|
||||
transport: transport,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects.
|
||||
func (e *streamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it.
|
||||
func (e *streamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating request: %v", err)
|
||||
}
|
||||
|
||||
conn, protocol, err := spdy.Negotiate(
|
||||
e.upgrader,
|
||||
&http.Client{Transport: e.transport},
|
||||
req,
|
||||
e.protocols...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
|
||||
switch protocol {
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
return conn, streamer, nil
|
||||
}
|
||||
|
||||
// StreamWithContext opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects or the context is done.
|
||||
func (e *streamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
conn, streamer, err := e.newConnectionAndStream(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
errorChan <- streamer.stream(conn)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
171
vendor/k8s.io/client-go/tools/remotecommand/spdy.go
generated
vendored
Normal file
171
vendor/k8s.io/client-go/tools/remotecommand/spdy.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// spdyStreamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type spdyStreamExecutor struct {
|
||||
upgrader spdy.Upgrader
|
||||
transport http.RoundTripper
|
||||
|
||||
method string
|
||||
url *url.URL
|
||||
protocols []string
|
||||
rejectRedirects bool // if true, receiving redirect from upstream is an error
|
||||
}
|
||||
|
||||
// NewSPDYExecutor connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams.
|
||||
func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future
|
||||
// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated)
|
||||
// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect
|
||||
// during the attempted upgrade in these "Stream" calls, an error is returned.
|
||||
func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spdyExecutor := executor.(*spdyStreamExecutor)
|
||||
spdyExecutor.rejectRedirects = true
|
||||
return spdyExecutor, nil
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForTransports connects to the provided server using the given transport,
|
||||
// upgrades the response using the given upgrader to multiplexed bidirectional streams.
|
||||
func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
return NewSPDYExecutorForProtocols(
|
||||
transport, upgrader, method, url,
|
||||
remotecommand.StreamProtocolV5Name,
|
||||
remotecommand.StreamProtocolV4Name,
|
||||
remotecommand.StreamProtocolV3Name,
|
||||
remotecommand.StreamProtocolV2Name,
|
||||
remotecommand.StreamProtocolV1Name,
|
||||
)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
|
||||
// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
|
||||
func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
|
||||
return &spdyStreamExecutor{
|
||||
upgrader: upgrader,
|
||||
transport: transport,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects.
|
||||
func (e *spdyStreamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it.
|
||||
func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating request: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{Transport: e.transport}
|
||||
if e.rejectRedirects {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return fmt.Errorf("redirect not allowed")
|
||||
}
|
||||
}
|
||||
conn, protocol, err := spdy.Negotiate(
|
||||
e.upgrader,
|
||||
&client,
|
||||
req,
|
||||
e.protocols...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
|
||||
switch protocol {
|
||||
case remotecommand.StreamProtocolV5Name:
|
||||
streamer = newStreamProtocolV5(options)
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
return conn, streamer, nil
|
||||
}
|
||||
|
||||
// StreamWithContext opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects or the context is done.
|
||||
func (e *spdyStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
conn, streamer, err := e.newConnectionAndStream(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
errorChan <- streamer.stream(conn)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
35
vendor/k8s.io/client-go/tools/remotecommand/v5.go
generated
vendored
Normal file
35
vendor/k8s.io/client-go/tools/remotecommand/v5.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
// streamProtocolV5 add support for V5 of the remote command subprotocol.
|
||||
// For the streamProtocolHandler, this version is the same as V4.
|
||||
type streamProtocolV5 struct {
|
||||
*streamProtocolV4
|
||||
}
|
||||
|
||||
var _ streamProtocolHandler = &streamProtocolV5{}
|
||||
|
||||
func newStreamProtocolV5(options StreamOptions) streamProtocolHandler {
|
||||
return &streamProtocolV5{
|
||||
streamProtocolV4: newStreamProtocolV4(options).(*streamProtocolV4),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV5) stream(conn streamCreator) error {
|
||||
return p.streamProtocolV4.stream(conn)
|
||||
}
|
502
vendor/k8s.io/client-go/tools/remotecommand/websocket.go
generated
vendored
Normal file
502
vendor/k8s.io/client-go/tools/remotecommand/websocket.go
generated
vendored
Normal file
@ -0,0 +1,502 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gwebsocket "github.com/gorilla/websocket"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/websocket"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// writeDeadline defines the time that a write to the websocket connection
|
||||
// must complete by, otherwise an i/o timeout occurs. The writeDeadline
|
||||
// has nothing to do with a response from the other websocket connection
|
||||
// endpoint; only that the message was successfully processed by the
|
||||
// local websocket connection. The typical write deadline within the websocket
|
||||
// library is one second.
|
||||
const writeDeadline = 2 * time.Second
|
||||
|
||||
var (
|
||||
_ Executor = &wsStreamExecutor{}
|
||||
_ streamCreator = &wsStreamCreator{}
|
||||
_ httpstream.Stream = &stream{}
|
||||
|
||||
streamType2streamID = map[string]byte{
|
||||
v1.StreamTypeStdin: remotecommand.StreamStdIn,
|
||||
v1.StreamTypeStdout: remotecommand.StreamStdOut,
|
||||
v1.StreamTypeStderr: remotecommand.StreamStdErr,
|
||||
v1.StreamTypeError: remotecommand.StreamErr,
|
||||
v1.StreamTypeResize: remotecommand.StreamResize,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// pingPeriod defines how often a heartbeat "ping" message is sent.
|
||||
pingPeriod = 5 * time.Second
|
||||
// pingReadDeadline defines the time waiting for a response heartbeat
|
||||
// "pong" message before a timeout error occurs for websocket reading.
|
||||
// This duration must always be greater than the "pingPeriod". By defining
|
||||
// this deadline in terms of the ping period, we are essentially saying
|
||||
// we can drop "X-1" (e.g. 3-1=2) pings before firing the timeout.
|
||||
pingReadDeadline = (pingPeriod * 3) + (1 * time.Second)
|
||||
)
|
||||
|
||||
// wsStreamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type wsStreamExecutor struct {
|
||||
transport http.RoundTripper
|
||||
upgrader websocket.ConnectionHolder
|
||||
method string
|
||||
url string
|
||||
// requested protocols in priority order (e.g. v5.channel.k8s.io before v4.channel.k8s.io).
|
||||
protocols []string
|
||||
// selected protocol from the handshake process; could be empty string if handshake fails.
|
||||
negotiated string
|
||||
// period defines how often a "ping" heartbeat message is sent to the other endpoint.
|
||||
heartbeatPeriod time.Duration
|
||||
// deadline defines the amount of time before "pong" response must be received.
|
||||
heartbeatDeadline time.Duration
|
||||
}
|
||||
|
||||
func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) {
|
||||
// Only supports V5 protocol for correct version skew functionality.
|
||||
// Previous api servers will proxy upgrade requests to legacy websocket
|
||||
// servers on container runtimes which support V1-V4. These legacy
|
||||
// websocket servers will not handle the new CLOSE signal.
|
||||
return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name)
|
||||
}
|
||||
|
||||
// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection.
|
||||
func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) {
|
||||
transport, upgrader, err := websocket.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating websocket transports: %v", err)
|
||||
}
|
||||
return &wsStreamExecutor{
|
||||
transport: transport,
|
||||
upgrader: upgrader,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
heartbeatPeriod: pingPeriod,
|
||||
heartbeatDeadline: pingReadDeadline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deprecated: use StreamWithContext instead to avoid possible resource leaks.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/103177 for details.
|
||||
func (e *wsStreamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// StreamWithContext upgrades an HTTPRequest to a WebSocket connection, and starts the various
|
||||
// goroutines to implement the necessary streams over the connection. The "options" parameter
|
||||
// defines which streams are requested. Returns an error if one occurred. This method is NOT
|
||||
// safe to run concurrently with the same executor (because of the state stored in the upgrader).
|
||||
func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn, err := websocket.Negotiate(e.transport, e.upgrader, req, e.protocols...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conn == nil {
|
||||
panic(fmt.Errorf("websocket connection is nil"))
|
||||
}
|
||||
defer conn.Close()
|
||||
e.negotiated = conn.Subprotocol()
|
||||
klog.V(4).Infof("The subprotocol is %s", e.negotiated)
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
switch e.negotiated {
|
||||
case remotecommand.StreamProtocolV5Name:
|
||||
streamer = newStreamProtocolV5(options)
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
creator := newWSStreamCreator(conn)
|
||||
go creator.readDemuxLoop(
|
||||
e.upgrader.DataBufferSize(),
|
||||
e.heartbeatPeriod,
|
||||
e.heartbeatDeadline,
|
||||
)
|
||||
errorChan <- streamer.stream(creator)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
type wsStreamCreator struct {
|
||||
conn *gwebsocket.Conn
|
||||
// Protects writing to websocket connection; reading is lock-free
|
||||
connWriteLock sync.Mutex
|
||||
// map of stream id to stream; multiple streams read/write the connection
|
||||
streams map[byte]*stream
|
||||
streamsMu sync.Mutex
|
||||
}
|
||||
|
||||
func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator {
|
||||
return &wsStreamCreator{
|
||||
conn: conn,
|
||||
streams: map[byte]*stream{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *wsStreamCreator) getStream(id byte) *stream {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
return c.streams[id]
|
||||
}
|
||||
|
||||
func (c *wsStreamCreator) setStream(id byte, s *stream) {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
c.streams[id] = s
|
||||
}
|
||||
|
||||
// CreateStream uses id from passed headers to create a stream over "c.conn" connection.
|
||||
// Returns a Stream structure or nil and an error if one occurred.
|
||||
func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) {
|
||||
streamType := headers.Get(v1.StreamType)
|
||||
id, ok := streamType2streamID[streamType]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown stream type: %s", streamType)
|
||||
}
|
||||
if s := c.getStream(id); s != nil {
|
||||
return nil, fmt.Errorf("duplicate stream for type %s", streamType)
|
||||
}
|
||||
reader, writer := io.Pipe()
|
||||
s := &stream{
|
||||
headers: headers,
|
||||
readPipe: reader,
|
||||
writePipe: writer,
|
||||
conn: c.conn,
|
||||
connWriteLock: &c.connWriteLock,
|
||||
id: id,
|
||||
}
|
||||
c.setStream(id, s)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket
|
||||
// connection. This loop reads the connection, and demultiplexes the data
|
||||
// into one of the individual stream pipes (by checking the stream id). This
|
||||
// loop can *not* be run concurrently, because there can only be one websocket
|
||||
// connection reader at a time (a read mutex would provide no benefit).
|
||||
func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, deadline time.Duration) {
|
||||
// Initialize and start the ping/pong heartbeat.
|
||||
h := newHeartbeat(c.conn, period, deadline)
|
||||
// Set initial timeout for websocket connection reading.
|
||||
if err := c.conn.SetReadDeadline(time.Now().Add(deadline)); err != nil {
|
||||
klog.Errorf("Websocket initial setting read deadline failed %v", err)
|
||||
return
|
||||
}
|
||||
go h.start()
|
||||
// Buffer size must correspond to the same size allocated
|
||||
// for the read buffer during websocket client creation. A
|
||||
// difference can cause incomplete connection reads.
|
||||
readBuffer := make([]byte, bufferSize)
|
||||
for {
|
||||
// NextReader() only returns data messages (BinaryMessage or Text
|
||||
// Message). Even though this call will never return control frames
|
||||
// such as ping, pong, or close, this call is necessary for these
|
||||
// message types to be processed. There can only be one reader
|
||||
// at a time, so this reader loop must *not* be run concurrently;
|
||||
// there is no lock for reading. Calling "NextReader()" before the
|
||||
// current reader has been processed will close the current reader.
|
||||
// If the heartbeat read deadline times out, this "NextReader()" will
|
||||
// return an i/o error, and error handling will clean up.
|
||||
messageType, r, err := c.conn.NextReader()
|
||||
if err != nil {
|
||||
websocketErr, ok := err.(*gwebsocket.CloseError)
|
||||
if ok && websocketErr.Code == gwebsocket.CloseNormalClosure {
|
||||
err = nil // readers will get io.EOF as it's a normal closure
|
||||
} else {
|
||||
err = fmt.Errorf("next reader: %w", err)
|
||||
}
|
||||
c.closeAllStreamReaders(err)
|
||||
return
|
||||
}
|
||||
// All remote command protocols send/receive only binary data messages.
|
||||
if messageType != gwebsocket.BinaryMessage {
|
||||
c.closeAllStreamReaders(fmt.Errorf("unexpected message type: %d", messageType))
|
||||
return
|
||||
}
|
||||
// It's ok to read just a single byte because the underlying library wraps the actual
|
||||
// connection with a buffered reader anyway.
|
||||
_, err = io.ReadFull(r, readBuffer[:1])
|
||||
if err != nil {
|
||||
c.closeAllStreamReaders(fmt.Errorf("read stream id: %w", err))
|
||||
return
|
||||
}
|
||||
streamID := readBuffer[0]
|
||||
s := c.getStream(streamID)
|
||||
if s == nil {
|
||||
klog.Errorf("Unknown stream id %d, discarding message", streamID)
|
||||
continue
|
||||
}
|
||||
for {
|
||||
nr, errRead := r.Read(readBuffer)
|
||||
if nr > 0 {
|
||||
// Write the data to the stream's pipe. This can block.
|
||||
_, errWrite := s.writePipe.Write(readBuffer[:nr])
|
||||
if errWrite != nil {
|
||||
// Pipe must have been closed by the stream user.
|
||||
// Nothing to do, discard the message.
|
||||
break
|
||||
}
|
||||
}
|
||||
if errRead != nil {
|
||||
if errRead == io.EOF {
|
||||
break
|
||||
}
|
||||
c.closeAllStreamReaders(fmt.Errorf("read message: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// closeAllStreamReaders closes readers in all streams.
|
||||
// This unblocks all stream.Read() calls.
|
||||
func (c *wsStreamCreator) closeAllStreamReaders(err error) {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
for _, s := range c.streams {
|
||||
// Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes.
|
||||
_ = s.writePipe.CloseWithError(err)
|
||||
}
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
headers http.Header
|
||||
readPipe *io.PipeReader
|
||||
writePipe *io.PipeWriter
|
||||
// conn is used for writing directly into the connection.
|
||||
// Is nil after Close() / Reset() to prevent future writes.
|
||||
conn *gwebsocket.Conn
|
||||
// connWriteLock protects conn against concurrent write operations. There must be a single writer and a single reader only.
|
||||
// The mutex is shared across all streams because the underlying connection is shared.
|
||||
connWriteLock *sync.Mutex
|
||||
id byte
|
||||
}
|
||||
|
||||
func (s *stream) Read(p []byte) (n int, err error) {
|
||||
return s.readPipe.Read(p)
|
||||
}
|
||||
|
||||
// Write writes directly to the underlying WebSocket connection.
|
||||
func (s *stream) Write(p []byte) (n int, err error) {
|
||||
klog.V(4).Infof("Write() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Write() done on stream %d", s.id)
|
||||
s.connWriteLock.Lock()
|
||||
defer s.connWriteLock.Unlock()
|
||||
if s.conn == nil {
|
||||
return 0, fmt.Errorf("write on closed stream %d", s.id)
|
||||
}
|
||||
err = s.conn.SetWriteDeadline(time.Now().Add(writeDeadline))
|
||||
if err != nil {
|
||||
klog.V(7).Infof("Websocket setting write deadline failed %v", err)
|
||||
return 0, err
|
||||
}
|
||||
// Message writer buffers the message data, so we don't need to do that ourselves.
|
||||
// Just write id and the data as two separate writes to avoid allocating an intermediate buffer.
|
||||
w, err := s.conn.NextWriter(gwebsocket.BinaryMessage)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if w != nil {
|
||||
w.Close()
|
||||
}
|
||||
}()
|
||||
_, err = w.Write([]byte{s.id})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = w.Write(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
err = w.Close()
|
||||
w = nil
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close half-closes the stream, indicating this side is finished with the stream.
|
||||
func (s *stream) Close() error {
|
||||
klog.V(4).Infof("Close() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Close() done on stream %d", s.id)
|
||||
s.connWriteLock.Lock()
|
||||
defer s.connWriteLock.Unlock()
|
||||
if s.conn == nil {
|
||||
return fmt.Errorf("Close() on already closed stream %d", s.id)
|
||||
}
|
||||
// Communicate the CLOSE stream signal to the other websocket endpoint.
|
||||
err := s.conn.WriteMessage(gwebsocket.BinaryMessage, []byte{remotecommand.StreamClose, s.id})
|
||||
s.conn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *stream) Reset() error {
|
||||
klog.V(4).Infof("Reset() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Reset() done on stream %d", s.id)
|
||||
s.Close()
|
||||
return s.writePipe.Close()
|
||||
}
|
||||
|
||||
func (s *stream) Headers() http.Header {
|
||||
return s.headers
|
||||
}
|
||||
|
||||
func (s *stream) Identifier() uint32 {
|
||||
return uint32(s.id)
|
||||
}
|
||||
|
||||
// heartbeat encasulates data necessary for the websocket ping/pong heartbeat. This
|
||||
// heartbeat works by setting a read deadline on the websocket connection, then
|
||||
// pushing this deadline into the future for every successful heartbeat. If the
|
||||
// heartbeat "pong" fails to respond within the deadline, then the "NextReader()" call
|
||||
// inside the "readDemuxLoop" will return an i/o error prompting a connection close
|
||||
// and cleanup.
|
||||
type heartbeat struct {
|
||||
conn *gwebsocket.Conn
|
||||
// period defines how often a "ping" heartbeat message is sent to the other endpoint
|
||||
period time.Duration
|
||||
// closing the "closer" channel will clean up the heartbeat timers
|
||||
closer chan struct{}
|
||||
// optional data to send with "ping" message
|
||||
message []byte
|
||||
// optionally received data message with "pong" message, same as sent with ping
|
||||
pongMessage []byte
|
||||
}
|
||||
|
||||
// newHeartbeat creates heartbeat structure encapsulating fields necessary to
|
||||
// run the websocket connection ping/pong mechanism and sets up handlers on
|
||||
// the websocket connection.
|
||||
func newHeartbeat(conn *gwebsocket.Conn, period time.Duration, deadline time.Duration) *heartbeat {
|
||||
h := &heartbeat{
|
||||
conn: conn,
|
||||
period: period,
|
||||
closer: make(chan struct{}),
|
||||
}
|
||||
// Set up handler for receiving returned "pong" message from other endpoint
|
||||
// by pushing the read deadline into the future. The "msg" received could
|
||||
// be empty.
|
||||
h.conn.SetPongHandler(func(msg string) error {
|
||||
// Push the read deadline into the future.
|
||||
klog.V(8).Infof("Pong message received (%s)--resetting read deadline", msg)
|
||||
err := h.conn.SetReadDeadline(time.Now().Add(deadline))
|
||||
if err != nil {
|
||||
klog.Errorf("Websocket setting read deadline failed %v", err)
|
||||
return err
|
||||
}
|
||||
if len(msg) > 0 {
|
||||
h.pongMessage = []byte(msg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
// Set up handler to cleanup timers when this endpoint receives "Close" message.
|
||||
closeHandler := h.conn.CloseHandler()
|
||||
h.conn.SetCloseHandler(func(code int, text string) error {
|
||||
close(h.closer)
|
||||
return closeHandler(code, text)
|
||||
})
|
||||
return h
|
||||
}
|
||||
|
||||
// setMessage is optional data sent with "ping" heartbeat. According to the websocket RFC
|
||||
// this data sent with "ping" message should be returned in "pong" message.
|
||||
func (h *heartbeat) setMessage(msg string) {
|
||||
h.message = []byte(msg)
|
||||
}
|
||||
|
||||
// start the heartbeat by setting up necesssary handlers and looping by sending "ping"
|
||||
// message every "period" until the "closer" channel is closed.
|
||||
func (h *heartbeat) start() {
|
||||
// Loop to continually send "ping" message through websocket connection every "period".
|
||||
t := time.NewTicker(h.period)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-h.closer:
|
||||
klog.V(8).Infof("closed channel--returning")
|
||||
return
|
||||
case <-t.C:
|
||||
// "WriteControl" does not need to be protected by a mutex. According to
|
||||
// gorilla/websockets library docs: "The Close and WriteControl methods can
|
||||
// be called concurrently with all other methods."
|
||||
if err := h.conn.WriteControl(gwebsocket.PingMessage, h.message, time.Now().Add(writeDeadline)); err == nil {
|
||||
klog.V(8).Infof("Websocket Ping succeeeded")
|
||||
} else {
|
||||
klog.Errorf("Websocket Ping failed: %v", err)
|
||||
if errors.Is(err, gwebsocket.ErrCloseSent) {
|
||||
// we continue because c.conn.CloseChan will manage closing the connection already
|
||||
continue
|
||||
} else if e, ok := err.(net.Error); ok && e.Timeout() {
|
||||
// Continue, in case this is a transient failure.
|
||||
// c.conn.CloseChan above will tell us when the connection is
|
||||
// actually closed.
|
||||
// If Temporary function hadn't been deprecated, we would have used it.
|
||||
// But most of temporary errors are timeout errors anyway.
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user