mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
build: move e2e dependencies into e2e/go.mod
Several packages are only used while running the e2e suite. These packages are less important to update, as the they can not influence the final executable that is part of the Ceph-CSI container-image. By moving these dependencies out of the main Ceph-CSI go.mod, it is easier to identify if a reported CVE affects Ceph-CSI, or only the testing (like most of the Kubernetes CVEs). Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
15da101b1b
commit
bec6090996
10
vendor/k8s.io/client-go/tools/events/OWNERS
generated
vendored
10
vendor/k8s.io/client-go/tools/events/OWNERS
generated
vendored
@ -1,10 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- sig-instrumentation-approvers
|
||||
- wojtek-t
|
||||
reviewers:
|
||||
- sig-instrumentation-reviewers
|
||||
- wojtek-t
|
||||
emeritus_approvers:
|
||||
- yastij
|
19
vendor/k8s.io/client-go/tools/events/doc.go
generated
vendored
19
vendor/k8s.io/client-go/tools/events/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package events has all client logic for recording and reporting
|
||||
// "k8s.io/api/events/v1".Event events.
|
||||
package events // import "k8s.io/client-go/tools/events"
|
457
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
457
vendor/k8s.io/client-go/tools/events/event_broadcaster.go
generated
vendored
@ -1,457 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
eventsv1 "k8s.io/api/events/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
typedv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
typedeventsv1 "k8s.io/client-go/kubernetes/typed/events/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/tools/record/util"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
maxTriesPerEvent = 12
|
||||
finishTime = 6 * time.Minute
|
||||
refreshTime = 30 * time.Minute
|
||||
maxQueuedEvents = 1000
|
||||
)
|
||||
|
||||
var defaultSleepDuration = 10 * time.Second
|
||||
|
||||
// TODO: validate impact of copying and investigate hashing
|
||||
type eventKey struct {
|
||||
eventType string
|
||||
action string
|
||||
reason string
|
||||
reportingController string
|
||||
reportingInstance string
|
||||
regarding corev1.ObjectReference
|
||||
related corev1.ObjectReference
|
||||
}
|
||||
|
||||
type eventBroadcasterImpl struct {
|
||||
*watch.Broadcaster
|
||||
mu sync.Mutex
|
||||
eventCache map[eventKey]*eventsv1.Event
|
||||
sleepDuration time.Duration
|
||||
sink EventSink
|
||||
}
|
||||
|
||||
// EventSinkImpl wraps EventsV1Interface to implement EventSink.
|
||||
// TODO: this makes it easier for testing purpose and masks the logic of performing API calls.
|
||||
// Note that rollbacking to raw clientset should also be transparent.
|
||||
type EventSinkImpl struct {
|
||||
Interface typedeventsv1.EventsV1Interface
|
||||
}
|
||||
|
||||
// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't create an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Create(ctx, event, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't update an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Update(ctx, event, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched event, and an error, if there is any.
|
||||
func (e *EventSinkImpl) Patch(ctx context.Context, event *eventsv1.Event, data []byte) (*eventsv1.Event, error) {
|
||||
if event.Namespace == "" {
|
||||
return nil, fmt.Errorf("can't patch an event with empty namespace")
|
||||
}
|
||||
return e.Interface.Events(event.Namespace).Patch(ctx, event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
}
|
||||
|
||||
// NewBroadcaster Creates a new event broadcaster.
|
||||
func NewBroadcaster(sink EventSink) EventBroadcaster {
|
||||
return newBroadcaster(sink, defaultSleepDuration, map[eventKey]*eventsv1.Event{})
|
||||
}
|
||||
|
||||
// NewBroadcasterForTest Creates a new event broadcaster for test purposes.
|
||||
func newBroadcaster(sink EventSink, sleepDuration time.Duration, eventCache map[eventKey]*eventsv1.Event) EventBroadcaster {
|
||||
return &eventBroadcasterImpl{
|
||||
Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull),
|
||||
eventCache: eventCache,
|
||||
sleepDuration: sleepDuration,
|
||||
sink: sink,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) Shutdown() {
|
||||
e.Broadcaster.Shutdown()
|
||||
}
|
||||
|
||||
// refreshExistingEventSeries refresh events TTL
|
||||
func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) {
|
||||
// TODO: Investigate whether lock contention won't be a problem
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
for isomorphicKey, event := range e.eventCache {
|
||||
if event.Series != nil {
|
||||
if recordedEvent, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
if recordedEvent != nil {
|
||||
e.eventCache[isomorphicKey] = recordedEvent
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finishSeries checks if a series has ended and either:
|
||||
// - write final count to the apiserver
|
||||
// - delete a singleton event (i.e. series field is nil) from the cache
|
||||
func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) {
|
||||
// TODO: Investigate whether lock contention won't be a problem
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
for isomorphicKey, event := range e.eventCache {
|
||||
eventSerie := event.Series
|
||||
if eventSerie != nil {
|
||||
if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) {
|
||||
if _, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
delete(e.eventCache, isomorphicKey)
|
||||
}
|
||||
}
|
||||
} else if event.EventTime.Time.Before(time.Now().Add(-finishTime)) {
|
||||
delete(e.eventCache, isomorphicKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewRecorder returns an EventRecorder that records events with the given event source.
|
||||
func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger {
|
||||
hostname, _ := os.Hostname()
|
||||
reportingInstance := reportingController + "-" + hostname
|
||||
return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()}
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1.Event, clock clock.Clock) {
|
||||
// Make a copy before modification, because there could be multiple listeners.
|
||||
eventCopy := event.DeepCopy()
|
||||
go func() {
|
||||
evToRecord := func() *eventsv1.Event {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
eventKey := getKey(eventCopy)
|
||||
isomorphicEvent, isIsomorphic := e.eventCache[eventKey]
|
||||
if isIsomorphic {
|
||||
if isomorphicEvent.Series != nil {
|
||||
isomorphicEvent.Series.Count++
|
||||
isomorphicEvent.Series.LastObservedTime = metav1.MicroTime{Time: clock.Now()}
|
||||
return nil
|
||||
}
|
||||
isomorphicEvent.Series = &eventsv1.EventSeries{
|
||||
Count: 2,
|
||||
LastObservedTime: metav1.MicroTime{Time: clock.Now()},
|
||||
}
|
||||
// Make a copy of the Event to make sure that recording it
|
||||
// doesn't mess with the object stored in cache.
|
||||
return isomorphicEvent.DeepCopy()
|
||||
}
|
||||
e.eventCache[eventKey] = eventCopy
|
||||
// Make a copy of the Event to make sure that recording it doesn't
|
||||
// mess with the object stored in cache.
|
||||
return eventCopy.DeepCopy()
|
||||
}()
|
||||
if evToRecord != nil {
|
||||
// TODO: Add a metric counting the number of recording attempts
|
||||
e.attemptRecording(ctx, evToRecord)
|
||||
// We don't want the new recorded Event to be reflected in the
|
||||
// client's cache because server-side mutations could mess with the
|
||||
// aggregation mechanism used by the client.
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) attemptRecording(ctx context.Context, event *eventsv1.Event) {
|
||||
tries := 0
|
||||
for {
|
||||
if _, retry := recordEvent(ctx, e.sink, event); !retry {
|
||||
return
|
||||
}
|
||||
tries++
|
||||
if tries >= maxTriesPerEvent {
|
||||
klog.FromContext(ctx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event)
|
||||
return
|
||||
}
|
||||
// Randomize sleep so that various clients won't all be
|
||||
// synced up if the master goes down. Give up when
|
||||
// the context is canceled.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(wait.Jitter(e.sleepDuration, 0.25)):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) {
|
||||
var newEvent *eventsv1.Event
|
||||
var err error
|
||||
isEventSeries := event.Series != nil
|
||||
if isEventSeries {
|
||||
patch, patchBytesErr := createPatchBytesForSeries(event)
|
||||
if patchBytesErr != nil {
|
||||
klog.FromContext(ctx).Error(patchBytesErr, "Unable to calculate diff, no merge is possible")
|
||||
return nil, false
|
||||
}
|
||||
newEvent, err = sink.Patch(ctx, event, patch)
|
||||
}
|
||||
// Update can fail because the event may have been removed and it no longer exists.
|
||||
if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) {
|
||||
// Making sure that ResourceVersion is empty on creation
|
||||
event.ResourceVersion = ""
|
||||
newEvent, err = sink.Create(ctx, event)
|
||||
}
|
||||
if err == nil {
|
||||
return newEvent, false
|
||||
}
|
||||
// If we can't contact the server, then hold everything while we keep trying.
|
||||
// Otherwise, something about the event is malformed and we should abandon it.
|
||||
switch err.(type) {
|
||||
case *restclient.RequestConstructionError:
|
||||
// We will construct the request the same next time, so don't keep trying.
|
||||
klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event)
|
||||
return nil, false
|
||||
case *errors.StatusError:
|
||||
if errors.IsAlreadyExists(err) {
|
||||
// If we tried to create an Event from an EventSerie, it means that
|
||||
// the original Patch request failed because the Event we were
|
||||
// trying to patch didn't exist. If the creation failed because the
|
||||
// Event now exists, it is safe to retry. This occurs when a new
|
||||
// Event is emitted twice in a very short period of time.
|
||||
if isEventSeries {
|
||||
return nil, true
|
||||
}
|
||||
klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err)
|
||||
} else {
|
||||
klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event)
|
||||
}
|
||||
return nil, false
|
||||
case *errors.UnexpectedObjectError:
|
||||
// We don't expect this; it implies the server's response didn't match a
|
||||
// known pattern. Go ahead and retry.
|
||||
default:
|
||||
// This case includes actual http transport errors. Go ahead and retry.
|
||||
}
|
||||
klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)")
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func createPatchBytesForSeries(event *eventsv1.Event) ([]byte, error) {
|
||||
oldEvent := event.DeepCopy()
|
||||
oldEvent.Series = nil
|
||||
oldData, err := json.Marshal(oldEvent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newData, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return strategicpatch.CreateTwoWayMergePatch(oldData, newData, eventsv1.Event{})
|
||||
}
|
||||
|
||||
func getKey(event *eventsv1.Event) eventKey {
|
||||
key := eventKey{
|
||||
eventType: event.Type,
|
||||
action: event.Action,
|
||||
reason: event.Reason,
|
||||
reportingController: event.ReportingController,
|
||||
reportingInstance: event.ReportingInstance,
|
||||
regarding: event.Regarding,
|
||||
}
|
||||
if event.Related != nil {
|
||||
key.related = *event.Related
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function.
|
||||
// The return value can be ignored or used to stop recording, if desired.
|
||||
// TODO: this function should also return an error.
|
||||
//
|
||||
// Deprecated: use StartLogging instead.
|
||||
func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() {
|
||||
logger := klog.Background().V(int(verbosity))
|
||||
stopWatcher, err := e.StartLogging(logger)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to start event watcher")
|
||||
return func() {}
|
||||
}
|
||||
return stopWatcher
|
||||
}
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the structured logger.
|
||||
// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`).
|
||||
// The returned function can be ignored or used to stop recording, if desired.
|
||||
func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) {
|
||||
return e.StartEventWatcher(
|
||||
func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
logger.Error(nil, "unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
logger.Info("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note)
|
||||
})
|
||||
}
|
||||
|
||||
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
|
||||
// The return value is used to stop recording
|
||||
func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) {
|
||||
watcher, err := e.Watch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
for {
|
||||
watchEvent, ok := <-watcher.ResultChan()
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
eventHandler(watchEvent.Object)
|
||||
}
|
||||
}()
|
||||
return watcher.Stop, nil
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterImpl) startRecordingEvents(ctx context.Context) error {
|
||||
eventHandler := func(obj runtime.Object) {
|
||||
event, ok := obj.(*eventsv1.Event)
|
||||
if !ok {
|
||||
klog.FromContext(ctx).Error(nil, "unexpected type, expected eventsv1.Event")
|
||||
return
|
||||
}
|
||||
e.recordToSink(ctx, event, clock.RealClock{})
|
||||
}
|
||||
stopWatcher, err := e.StartEventWatcher(eventHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
stopWatcher()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
// Deprecated: use StartRecordingToSinkWithContext instead.
|
||||
func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) {
|
||||
err := e.StartRecordingToSinkWithContext(wait.ContextForChannel(stopCh))
|
||||
if err != nil {
|
||||
klog.Background().Error(err, "Failed to start recording to sink")
|
||||
}
|
||||
}
|
||||
|
||||
// StartRecordingToSinkWithContext starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
func (e *eventBroadcasterImpl) StartRecordingToSinkWithContext(ctx context.Context) error {
|
||||
go wait.UntilWithContext(ctx, e.refreshExistingEventSeries, refreshTime)
|
||||
go wait.UntilWithContext(ctx, e.finishSeries, finishTime)
|
||||
return e.startRecordingEvents(ctx)
|
||||
}
|
||||
|
||||
type eventBroadcasterAdapterImpl struct {
|
||||
coreClient typedv1core.EventsGetter
|
||||
coreBroadcaster record.EventBroadcaster
|
||||
eventsv1Client typedeventsv1.EventsV1Interface
|
||||
eventsv1Broadcaster EventBroadcaster
|
||||
}
|
||||
|
||||
// NewEventBroadcasterAdapter creates a wrapper around new and legacy broadcasters to simplify
|
||||
// migration of individual components to the new Event API.
|
||||
//
|
||||
//logcheck:context // NewEventBroadcasterAdapterWithContext should be used instead because record.NewBroadcaster is called and works better when a context is supplied (contextual logging, cancellation).
|
||||
func NewEventBroadcasterAdapter(client clientset.Interface) EventBroadcasterAdapter {
|
||||
return NewEventBroadcasterAdapterWithContext(context.Background(), client)
|
||||
}
|
||||
|
||||
// NewEventBroadcasterAdapterWithContext creates a wrapper around new and legacy broadcasters to simplify
|
||||
// migration of individual components to the new Event API.
|
||||
func NewEventBroadcasterAdapterWithContext(ctx context.Context, client clientset.Interface) EventBroadcasterAdapter {
|
||||
eventClient := &eventBroadcasterAdapterImpl{}
|
||||
if _, err := client.Discovery().ServerResourcesForGroupVersion(eventsv1.SchemeGroupVersion.String()); err == nil {
|
||||
eventClient.eventsv1Client = client.EventsV1()
|
||||
eventClient.eventsv1Broadcaster = NewBroadcaster(&EventSinkImpl{Interface: eventClient.eventsv1Client})
|
||||
}
|
||||
// Even though there can soon exist cases when coreBroadcaster won't really be needed,
|
||||
// we create it unconditionally because its overhead is minor and will simplify using usage
|
||||
// patterns of this library in all components.
|
||||
eventClient.coreClient = client.CoreV1()
|
||||
eventClient.coreBroadcaster = record.NewBroadcaster(record.WithContext(ctx))
|
||||
return eventClient
|
||||
}
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
|
||||
func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{}) {
|
||||
if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil {
|
||||
e.eventsv1Broadcaster.StartRecordingToSink(stopCh)
|
||||
}
|
||||
if e.coreBroadcaster != nil && e.coreClient != nil {
|
||||
e.coreBroadcaster.StartRecordingToSink(&typedv1core.EventSinkImpl{Interface: e.coreClient.Events("")})
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorderLogger {
|
||||
if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil {
|
||||
return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name)
|
||||
}
|
||||
return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name))
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger {
|
||||
return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name})
|
||||
}
|
||||
|
||||
func (e *eventBroadcasterAdapterImpl) Shutdown() {
|
||||
if e.coreBroadcaster != nil {
|
||||
e.coreBroadcaster.Shutdown()
|
||||
}
|
||||
if e.eventsv1Broadcaster != nil {
|
||||
e.eventsv1Broadcaster.Shutdown()
|
||||
}
|
||||
}
|
113
vendor/k8s.io/client-go/tools/events/event_recorder.go
generated
vendored
113
vendor/k8s.io/client-go/tools/events/event_recorder.go
generated
vendored
@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
eventsv1 "k8s.io/api/events/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/record/util"
|
||||
"k8s.io/client-go/tools/reference"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type recorderImpl struct {
|
||||
scheme *runtime.Scheme
|
||||
reportingController string
|
||||
reportingInstance string
|
||||
*watch.Broadcaster
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
var _ EventRecorder = &recorderImpl{}
|
||||
|
||||
func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
recorder.eventf(klog.Background(), regarding, related, eventtype, reason, action, note, args...)
|
||||
}
|
||||
|
||||
type recorderImplLogger struct {
|
||||
*recorderImpl
|
||||
logger klog.Logger
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &recorderImplLogger{}
|
||||
|
||||
func (recorder *recorderImplLogger) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
recorder.eventf(recorder.logger, regarding, related, eventtype, reason, action, note, args...)
|
||||
}
|
||||
|
||||
func (recorder *recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return &recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger}
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
timestamp := metav1.MicroTime{Time: time.Now()}
|
||||
message := fmt.Sprintf(note, args...)
|
||||
refRegarding, err := reference.GetReference(recorder.scheme, regarding)
|
||||
if err != nil {
|
||||
logger.Error(err, "Could not construct reference, will not report event", "object", regarding, "eventType", eventtype, "reason", reason, "message", message)
|
||||
return
|
||||
}
|
||||
|
||||
var refRelated *v1.ObjectReference
|
||||
if related != nil {
|
||||
refRelated, err = reference.GetReference(recorder.scheme, related)
|
||||
if err != nil {
|
||||
logger.V(9).Info("Could not construct reference", "object", related, "err", err)
|
||||
}
|
||||
}
|
||||
if !util.ValidateEventType(eventtype) {
|
||||
logger.Error(nil, "Unsupported event type", "eventType", eventtype)
|
||||
return
|
||||
}
|
||||
event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action)
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
recorder.Action(watch.Added, event)
|
||||
}()
|
||||
}
|
||||
|
||||
func (recorder *recorderImpl) makeEvent(refRegarding *v1.ObjectReference, refRelated *v1.ObjectReference, timestamp metav1.MicroTime, eventtype, reason, message string, reportingController string, reportingInstance string, action string) *eventsv1.Event {
|
||||
t := metav1.Time{Time: recorder.clock.Now()}
|
||||
namespace := refRegarding.Namespace
|
||||
if namespace == "" {
|
||||
namespace = metav1.NamespaceDefault
|
||||
}
|
||||
return &eventsv1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", refRegarding.Name, t.UnixNano()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
EventTime: timestamp,
|
||||
Series: nil,
|
||||
ReportingController: reportingController,
|
||||
ReportingInstance: reportingInstance,
|
||||
Action: action,
|
||||
Reason: reason,
|
||||
Regarding: *refRegarding,
|
||||
Related: refRelated,
|
||||
Note: message,
|
||||
Type: eventtype,
|
||||
}
|
||||
}
|
52
vendor/k8s.io/client-go/tools/events/fake.go
generated
vendored
52
vendor/k8s.io/client-go/tools/events/fake.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FakeRecorder is used as a fake during tests. It is thread safe. It is usable
|
||||
// when created manually and not by NewFakeRecorder, however all events may be
|
||||
// thrown away in this case.
|
||||
type FakeRecorder struct {
|
||||
Events chan string
|
||||
}
|
||||
|
||||
var _ EventRecorderLogger = &FakeRecorder{}
|
||||
|
||||
// Eventf emits an event
|
||||
func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||
if f.Events != nil {
|
||||
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+note, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger {
|
||||
return f
|
||||
}
|
||||
|
||||
// NewFakeRecorder creates new fake event recorder with event channel with
|
||||
// buffer of given size.
|
||||
func NewFakeRecorder(bufferSize int) *FakeRecorder {
|
||||
return &FakeRecorder{
|
||||
Events: make(chan string, bufferSize),
|
||||
}
|
||||
}
|
64
vendor/k8s.io/client-go/tools/events/helper.go
generated
vendored
64
vendor/k8s.io/client-go/tools/events/helper.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
eventsv1 "k8s.io/api/events/v1"
|
||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
var mapping = map[schema.GroupVersion]string{
|
||||
eventsv1.SchemeGroupVersion: "regarding",
|
||||
eventsv1beta1.SchemeGroupVersion: "regarding",
|
||||
corev1.SchemeGroupVersion: "involvedObject",
|
||||
}
|
||||
|
||||
// GetFieldSelector returns the appropriate field selector based on the API version being used to communicate with the server.
|
||||
// The returned field selector can be used with List and Watch to filter desired events.
|
||||
func GetFieldSelector(eventsGroupVersion schema.GroupVersion, regardingGroupVersionKind schema.GroupVersionKind, regardingName string, regardingUID types.UID) (fields.Selector, error) {
|
||||
field := fields.Set{}
|
||||
|
||||
if _, ok := mapping[eventsGroupVersion]; !ok {
|
||||
return nil, fmt.Errorf("unknown version %v", eventsGroupVersion)
|
||||
}
|
||||
prefix := mapping[eventsGroupVersion]
|
||||
|
||||
if len(regardingName) > 0 {
|
||||
field[prefix+".name"] = regardingName
|
||||
}
|
||||
|
||||
if len(regardingGroupVersionKind.Kind) > 0 {
|
||||
field[prefix+".kind"] = regardingGroupVersionKind.Kind
|
||||
}
|
||||
|
||||
regardingGroupVersion := regardingGroupVersionKind.GroupVersion()
|
||||
if !regardingGroupVersion.Empty() {
|
||||
field[prefix+".apiVersion"] = regardingGroupVersion.String()
|
||||
}
|
||||
|
||||
if len(regardingUID) > 0 {
|
||||
field[prefix+".uid"] = string(regardingUID)
|
||||
}
|
||||
|
||||
return field.AsSelector(), nil
|
||||
}
|
92
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
92
vendor/k8s.io/client-go/tools/events/interfaces.go
generated
vendored
@ -1,92 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
eventsv1 "k8s.io/api/events/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
internalevents "k8s.io/client-go/tools/internal/events"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type EventRecorder = internalevents.EventRecorder
|
||||
type EventRecorderLogger = internalevents.EventRecorderLogger
|
||||
|
||||
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
|
||||
type EventBroadcaster interface {
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster.
|
||||
// Deprecated: use StartRecordingToSinkWithContext instead.
|
||||
StartRecordingToSink(stopCh <-chan struct{})
|
||||
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster.
|
||||
StartRecordingToSinkWithContext(ctx context.Context) error
|
||||
|
||||
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
|
||||
// with the event source set to the given event source.
|
||||
NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger
|
||||
|
||||
// StartEventWatcher enables you to watch for emitted events without usage
|
||||
// of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests).
|
||||
// NOTE: events received on your eventHandler should be copied before being used.
|
||||
// TODO: figure out if this can be removed.
|
||||
StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error)
|
||||
|
||||
// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured
|
||||
// logging function. The return value can be ignored or used to stop recording, if desired.
|
||||
// Deprecated: use StartLogging instead.
|
||||
StartStructuredLogging(verbosity klog.Level) func()
|
||||
|
||||
// StartLogging starts sending events received from this EventBroadcaster to the structured logger.
|
||||
// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`).
|
||||
// The returned function can be ignored or used to stop recording, if desired.
|
||||
StartLogging(logger klog.Logger) (func(), error)
|
||||
|
||||
// Shutdown shuts down the broadcaster
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// EventSink knows how to store events (client-go implements it.)
|
||||
// EventSink must respect the namespace that will be embedded in 'event'.
|
||||
// It is assumed that EventSink will return the same sorts of errors as
|
||||
// client-go's REST client.
|
||||
type EventSink interface {
|
||||
Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error)
|
||||
Patch(ctx context.Context, oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error)
|
||||
}
|
||||
|
||||
// EventBroadcasterAdapter is a auxiliary interface to simplify migration to
|
||||
// the new events API. It is a wrapper around new and legacy broadcasters
|
||||
// that smartly chooses which one to use.
|
||||
//
|
||||
// Deprecated: This interface will be removed once migration is completed.
|
||||
type EventBroadcasterAdapter interface {
|
||||
// StartRecordingToSink starts sending events received from the specified eventBroadcaster.
|
||||
StartRecordingToSink(stopCh <-chan struct{})
|
||||
|
||||
// NewRecorder creates a new Event Recorder with specified name.
|
||||
NewRecorder(name string) EventRecorderLogger
|
||||
|
||||
// DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name.
|
||||
DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger
|
||||
|
||||
// Shutdown shuts down the broadcaster.
|
||||
Shutdown()
|
||||
}
|
10
vendor/k8s.io/client-go/tools/portforward/OWNERS
generated
vendored
10
vendor/k8s.io/client-go/tools/portforward/OWNERS
generated
vendored
@ -1,10 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- aojea
|
||||
- liggitt
|
||||
- seans3
|
||||
reviewers:
|
||||
- aojea
|
||||
- liggitt
|
||||
- seans3
|
19
vendor/k8s.io/client-go/tools/portforward/doc.go
generated
vendored
19
vendor/k8s.io/client-go/tools/portforward/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package portforward adds support for SSH-like port forwarding from the client's
|
||||
// local host to remote containers.
|
||||
package portforward // import "k8s.io/client-go/tools/portforward"
|
57
vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go
generated
vendored
57
vendor/k8s.io/client-go/tools/portforward/fallback_dialer.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package portforward
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var _ httpstream.Dialer = &FallbackDialer{}
|
||||
|
||||
// FallbackDialer encapsulates a primary and secondary dialer, including
|
||||
// the boolean function to determine if the primary dialer failed. Implements
|
||||
// the httpstream.Dialer interface.
|
||||
type FallbackDialer struct {
|
||||
primary httpstream.Dialer
|
||||
secondary httpstream.Dialer
|
||||
shouldFallback func(error) bool
|
||||
}
|
||||
|
||||
// NewFallbackDialer creates the FallbackDialer with the primary and secondary dialers,
|
||||
// as well as the boolean function to determine if the primary dialer failed.
|
||||
func NewFallbackDialer(primary, secondary httpstream.Dialer, shouldFallback func(error) bool) httpstream.Dialer {
|
||||
return &FallbackDialer{
|
||||
primary: primary,
|
||||
secondary: secondary,
|
||||
shouldFallback: shouldFallback,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial is the single function necessary to implement the "httpstream.Dialer" interface.
|
||||
// It takes the protocol version strings to request, returning an the upgraded
|
||||
// httstream.Connection and the negotiated protocol version accepted. If the initial
|
||||
// primary dialer fails, this function attempts the secondary dialer. Returns an error
|
||||
// if one occurs.
|
||||
func (f *FallbackDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
|
||||
conn, version, err := f.primary.Dial(protocols...)
|
||||
if err != nil && f.shouldFallback(err) {
|
||||
klog.V(4).Infof("fallback to secondary dialer from primary dialer err: %v", err)
|
||||
return f.secondary.Dial(protocols...)
|
||||
}
|
||||
return conn, version, err
|
||||
}
|
454
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
454
vendor/k8s.io/client-go/tools/portforward/portforward.go
generated
vendored
@ -1,454 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package portforward
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
// PortForwardProtocolV1Name is the subprotocol used for port forwarding.
|
||||
// TODO move to API machinery and re-unify with kubelet/server/portfoward
|
||||
const PortForwardProtocolV1Name = "portforward.k8s.io"
|
||||
|
||||
var (
|
||||
// error returned whenever we lost connection to a pod
|
||||
ErrLostConnectionToPod = errors.New("lost connection to pod")
|
||||
|
||||
// set of error we're expecting during port-forwarding
|
||||
networkClosedError = "use of closed network connection"
|
||||
)
|
||||
|
||||
// PortForwarder knows how to listen for local connections and forward them to
|
||||
// a remote pod via an upgraded HTTP request.
|
||||
type PortForwarder struct {
|
||||
addresses []listenAddress
|
||||
ports []ForwardedPort
|
||||
stopChan <-chan struct{}
|
||||
|
||||
dialer httpstream.Dialer
|
||||
streamConn httpstream.Connection
|
||||
listeners []io.Closer
|
||||
Ready chan struct{}
|
||||
requestIDLock sync.Mutex
|
||||
requestID int
|
||||
out io.Writer
|
||||
errOut io.Writer
|
||||
}
|
||||
|
||||
// ForwardedPort contains a Local:Remote port pairing.
|
||||
type ForwardedPort struct {
|
||||
Local uint16
|
||||
Remote uint16
|
||||
}
|
||||
|
||||
/*
|
||||
valid port specifications:
|
||||
|
||||
5000
|
||||
- forwards from localhost:5000 to pod:5000
|
||||
|
||||
8888:5000
|
||||
- forwards from localhost:8888 to pod:5000
|
||||
|
||||
0:5000
|
||||
:5000
|
||||
- selects a random available local port,
|
||||
forwards from localhost:<random port> to pod:5000
|
||||
*/
|
||||
func parsePorts(ports []string) ([]ForwardedPort, error) {
|
||||
var forwards []ForwardedPort
|
||||
for _, portString := range ports {
|
||||
parts := strings.Split(portString, ":")
|
||||
var localString, remoteString string
|
||||
if len(parts) == 1 {
|
||||
localString = parts[0]
|
||||
remoteString = parts[0]
|
||||
} else if len(parts) == 2 {
|
||||
localString = parts[0]
|
||||
if localString == "" {
|
||||
// support :5000
|
||||
localString = "0"
|
||||
}
|
||||
remoteString = parts[1]
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid port format '%s'", portString)
|
||||
}
|
||||
|
||||
localPort, err := strconv.ParseUint(localString, 10, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing local port '%s': %s", localString, err)
|
||||
}
|
||||
|
||||
remotePort, err := strconv.ParseUint(remoteString, 10, 16)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing remote port '%s': %s", remoteString, err)
|
||||
}
|
||||
if remotePort == 0 {
|
||||
return nil, fmt.Errorf("remote port must be > 0")
|
||||
}
|
||||
|
||||
forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
|
||||
}
|
||||
|
||||
return forwards, nil
|
||||
}
|
||||
|
||||
type listenAddress struct {
|
||||
address string
|
||||
protocol string
|
||||
failureMode string
|
||||
}
|
||||
|
||||
func parseAddresses(addressesToParse []string) ([]listenAddress, error) {
|
||||
var addresses []listenAddress
|
||||
parsed := make(map[string]listenAddress)
|
||||
for _, address := range addressesToParse {
|
||||
if address == "localhost" {
|
||||
if _, exists := parsed["127.0.0.1"]; !exists {
|
||||
ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"}
|
||||
parsed[ip.address] = ip
|
||||
}
|
||||
if _, exists := parsed["::1"]; !exists {
|
||||
ip := listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"}
|
||||
parsed[ip.address] = ip
|
||||
}
|
||||
} else if netutils.ParseIPSloppy(address).To4() != nil {
|
||||
parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"}
|
||||
} else if netutils.ParseIPSloppy(address) != nil {
|
||||
parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"}
|
||||
} else {
|
||||
return nil, fmt.Errorf("%s is not a valid IP", address)
|
||||
}
|
||||
}
|
||||
addresses = make([]listenAddress, len(parsed))
|
||||
id := 0
|
||||
for _, v := range parsed {
|
||||
addresses[id] = v
|
||||
id++
|
||||
}
|
||||
// Sort addresses before returning to get a stable order
|
||||
sort.Slice(addresses, func(i, j int) bool { return addresses[i].address < addresses[j].address })
|
||||
|
||||
return addresses, nil
|
||||
}
|
||||
|
||||
// New creates a new PortForwarder with localhost listen addresses.
|
||||
func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
|
||||
return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut)
|
||||
}
|
||||
|
||||
// NewOnAddresses creates a new PortForwarder with custom listen addresses.
|
||||
func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
|
||||
if len(addresses) == 0 {
|
||||
return nil, errors.New("you must specify at least 1 address")
|
||||
}
|
||||
parsedAddresses, err := parseAddresses(addresses)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ports) == 0 {
|
||||
return nil, errors.New("you must specify at least 1 port")
|
||||
}
|
||||
parsedPorts, err := parsePorts(ports)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PortForwarder{
|
||||
dialer: dialer,
|
||||
addresses: parsedAddresses,
|
||||
ports: parsedPorts,
|
||||
stopChan: stopChan,
|
||||
Ready: readyChan,
|
||||
out: out,
|
||||
errOut: errOut,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ForwardPorts formats and executes a port forwarding request. The connection will remain
|
||||
// open until stopChan is closed.
|
||||
func (pf *PortForwarder) ForwardPorts() error {
|
||||
defer pf.Close()
|
||||
|
||||
var err error
|
||||
var protocol string
|
||||
pf.streamConn, protocol, err = pf.dialer.Dial(PortForwardProtocolV1Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error upgrading connection: %s", err)
|
||||
}
|
||||
defer pf.streamConn.Close()
|
||||
if protocol != PortForwardProtocolV1Name {
|
||||
return fmt.Errorf("unable to negotiate protocol: client supports %q, server returned %q", PortForwardProtocolV1Name, protocol)
|
||||
}
|
||||
|
||||
return pf.forward()
|
||||
}
|
||||
|
||||
// forward dials the remote host specific in req, upgrades the request, starts
|
||||
// listeners for each port specified in ports, and forwards local connections
|
||||
// to the remote host via streams.
|
||||
func (pf *PortForwarder) forward() error {
|
||||
var err error
|
||||
|
||||
listenSuccess := false
|
||||
for i := range pf.ports {
|
||||
port := &pf.ports[i]
|
||||
err = pf.listenOnPort(port)
|
||||
switch {
|
||||
case err == nil:
|
||||
listenSuccess = true
|
||||
default:
|
||||
if pf.errOut != nil {
|
||||
fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !listenSuccess {
|
||||
return fmt.Errorf("unable to listen on any of the requested ports: %v", pf.ports)
|
||||
}
|
||||
|
||||
if pf.Ready != nil {
|
||||
close(pf.Ready)
|
||||
}
|
||||
|
||||
// wait for interrupt or conn closure
|
||||
select {
|
||||
case <-pf.stopChan:
|
||||
case <-pf.streamConn.CloseChan():
|
||||
return ErrLostConnectionToPod
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// listenOnPort delegates listener creation and waits for connections on requested bind addresses.
|
||||
// An error is raised based on address groups (default and localhost) and their failure modes
|
||||
func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
|
||||
var errors []error
|
||||
failCounters := make(map[string]int, 2)
|
||||
successCounters := make(map[string]int, 2)
|
||||
for _, addr := range pf.addresses {
|
||||
err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
failCounters[addr.failureMode]++
|
||||
} else {
|
||||
successCounters[addr.failureMode]++
|
||||
}
|
||||
}
|
||||
if successCounters["all"] == 0 && failCounters["all"] > 0 {
|
||||
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
|
||||
}
|
||||
if failCounters["any"] > 0 {
|
||||
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listenOnPortAndAddress delegates listener creation and waits for new connections
|
||||
// in the background f
|
||||
func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error {
|
||||
listener, err := pf.getListener(protocol, address, port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pf.listeners = append(pf.listeners, listener)
|
||||
go pf.waitForConnection(listener, *port)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getListener creates a listener on the interface targeted by the given hostname on the given port with
|
||||
// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
|
||||
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
|
||||
listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local))))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create listener: Error %s", err)
|
||||
}
|
||||
listenerAddress := listener.Addr().String()
|
||||
host, localPort, _ := net.SplitHostPort(listenerAddress)
|
||||
localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
|
||||
return nil, fmt.Errorf("error parsing local port: %s from %s (%s)", err, listenerAddress, host)
|
||||
}
|
||||
port.Local = uint16(localPortUInt)
|
||||
if pf.out != nil {
|
||||
fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote)
|
||||
}
|
||||
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
// waitForConnection waits for new connections to listener and handles them in
|
||||
// the background.
|
||||
func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {
|
||||
for {
|
||||
select {
|
||||
case <-pf.streamConn.CloseChan():
|
||||
return
|
||||
default:
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
|
||||
if !strings.Contains(strings.ToLower(err.Error()), networkClosedError) {
|
||||
runtime.HandleError(fmt.Errorf("error accepting connection on port %d: %v", port.Local, err))
|
||||
}
|
||||
return
|
||||
}
|
||||
go pf.handleConnection(conn, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pf *PortForwarder) nextRequestID() int {
|
||||
pf.requestIDLock.Lock()
|
||||
defer pf.requestIDLock.Unlock()
|
||||
id := pf.requestID
|
||||
pf.requestID++
|
||||
return id
|
||||
}
|
||||
|
||||
// handleConnection copies data between the local connection and the stream to
|
||||
// the remote server.
|
||||
func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
|
||||
defer conn.Close()
|
||||
|
||||
if pf.out != nil {
|
||||
fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local)
|
||||
}
|
||||
|
||||
requestID := pf.nextRequestID()
|
||||
|
||||
// create error stream
|
||||
headers := http.Header{}
|
||||
headers.Set(v1.StreamType, v1.StreamTypeError)
|
||||
headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote))
|
||||
headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID))
|
||||
errorStream, err := pf.streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err))
|
||||
return
|
||||
}
|
||||
// we're not writing to this stream
|
||||
errorStream.Close()
|
||||
defer pf.streamConn.RemoveStreams(errorStream)
|
||||
|
||||
errorChan := make(chan error)
|
||||
go func() {
|
||||
message, err := io.ReadAll(errorStream)
|
||||
switch {
|
||||
case err != nil:
|
||||
errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
|
||||
case len(message) > 0:
|
||||
errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message))
|
||||
}
|
||||
close(errorChan)
|
||||
}()
|
||||
|
||||
// create data stream
|
||||
headers.Set(v1.StreamType, v1.StreamTypeData)
|
||||
dataStream, err := pf.streamConn.CreateStream(headers)
|
||||
if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
|
||||
return
|
||||
}
|
||||
defer pf.streamConn.RemoveStreams(dataStream)
|
||||
|
||||
localError := make(chan struct{})
|
||||
remoteDone := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
// Copy from the remote side to the local port.
|
||||
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(strings.ToLower(err.Error()), networkClosedError) {
|
||||
runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
|
||||
}
|
||||
|
||||
// inform the select below that the remote copy is done
|
||||
close(remoteDone)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
// inform server we're not sending any more data after copy unblocks
|
||||
defer dataStream.Close()
|
||||
|
||||
// Copy from the local port to the remote side.
|
||||
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(strings.ToLower(err.Error()), networkClosedError) {
|
||||
runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
|
||||
// break out of the select below without waiting for the other copy to finish
|
||||
close(localError)
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for either a local->remote error or for copying from remote->local to finish
|
||||
select {
|
||||
case <-remoteDone:
|
||||
case <-localError:
|
||||
}
|
||||
|
||||
// reset dataStream to discard any unsent data, preventing port forwarding from being blocked.
|
||||
// we must reset dataStream before waiting on errorChan, otherwise,
|
||||
// the blocking data will affect errorStream and cause <-errorChan to block indefinitely.
|
||||
_ = dataStream.Reset()
|
||||
|
||||
// always expect something on errorChan (it may be nil)
|
||||
err = <-errorChan
|
||||
if err != nil {
|
||||
runtime.HandleError(err)
|
||||
pf.streamConn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Close stops all listeners of PortForwarder.
|
||||
func (pf *PortForwarder) Close() {
|
||||
// stop all listeners
|
||||
for _, l := range pf.listeners {
|
||||
if err := l.Close(); err != nil {
|
||||
runtime.HandleError(fmt.Errorf("error closing listener: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetPorts will return the ports that were forwarded; this can be used to
|
||||
// retrieve the locally-bound port in cases where the input was port 0. This
|
||||
// function will signal an error if the Ready channel is nil or if the
|
||||
// listeners are not ready yet; this function will succeed after the Ready
|
||||
// channel has been closed.
|
||||
func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) {
|
||||
if pf.Ready == nil {
|
||||
return nil, fmt.Errorf("no Ready channel provided")
|
||||
}
|
||||
select {
|
||||
case <-pf.Ready:
|
||||
return pf.ports, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("listeners not ready")
|
||||
}
|
||||
}
|
158
vendor/k8s.io/client-go/tools/portforward/tunneling_connection.go
generated
vendored
158
vendor/k8s.io/client-go/tools/portforward/tunneling_connection.go
generated
vendored
@ -1,158 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package portforward
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gwebsocket "github.com/gorilla/websocket"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var _ net.Conn = &TunnelingConnection{}
|
||||
|
||||
// TunnelingConnection implements the "httpstream.Connection" interface, wrapping
|
||||
// a websocket connection that tunnels SPDY.
|
||||
type TunnelingConnection struct {
|
||||
name string
|
||||
conn *gwebsocket.Conn
|
||||
inProgressMessage io.Reader
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
||||
// NewTunnelingConnection wraps the passed gorilla/websockets connection
|
||||
// with the TunnelingConnection struct (implementing net.Conn).
|
||||
func NewTunnelingConnection(name string, conn *gwebsocket.Conn) *TunnelingConnection {
|
||||
return &TunnelingConnection{
|
||||
name: name,
|
||||
conn: conn,
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements "io.Reader" interface, reading from the stored connection
|
||||
// into the passed buffer "p". Returns the number of bytes read and an error.
|
||||
// Can keep track of the "inProgress" messsage from the tunneled connection.
|
||||
func (c *TunnelingConnection) Read(p []byte) (int, error) {
|
||||
klog.V(7).Infof("%s: tunneling connection read...", c.name)
|
||||
defer klog.V(7).Infof("%s: tunneling connection read...complete", c.name)
|
||||
for {
|
||||
if c.inProgressMessage == nil {
|
||||
klog.V(8).Infof("%s: tunneling connection read before NextReader()...", c.name)
|
||||
messageType, nextReader, err := c.conn.NextReader()
|
||||
if err != nil {
|
||||
closeError := &gwebsocket.CloseError{}
|
||||
if errors.As(err, &closeError) && closeError.Code == gwebsocket.CloseNormalClosure {
|
||||
return 0, io.EOF
|
||||
}
|
||||
klog.V(4).Infof("%s:tunneling connection NextReader() error: %v", c.name, err)
|
||||
return 0, err
|
||||
}
|
||||
if messageType != gwebsocket.BinaryMessage {
|
||||
return 0, fmt.Errorf("invalid message type received")
|
||||
}
|
||||
c.inProgressMessage = nextReader
|
||||
}
|
||||
klog.V(8).Infof("%s: tunneling connection read in progress message...", c.name)
|
||||
i, err := c.inProgressMessage.Read(p)
|
||||
if i == 0 && err == io.EOF {
|
||||
c.inProgressMessage = nil
|
||||
} else {
|
||||
klog.V(8).Infof("%s: read %d bytes, error=%v, bytes=% X", c.name, i, err, p[:i])
|
||||
return i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements "io.Writer" interface, copying the data in the passed
|
||||
// byte array "p" into the stored tunneled connection. Returns the number
|
||||
// of bytes written and an error.
|
||||
func (c *TunnelingConnection) Write(p []byte) (n int, err error) {
|
||||
klog.V(7).Infof("%s: write: %d bytes, bytes=% X", c.name, len(p), p)
|
||||
defer klog.V(7).Infof("%s: tunneling connection write...complete", c.name)
|
||||
w, err := c.conn.NextWriter(gwebsocket.BinaryMessage)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
// close, which flushes the message
|
||||
closeErr := w.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
// if closing/flushing errored and we weren't already returning an error, return the close error
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
n, err = w.Write(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Close implements "io.Closer" interface, signaling the other tunneled connection
|
||||
// endpoint, and closing the tunneled connection only once.
|
||||
func (c *TunnelingConnection) Close() error {
|
||||
var err error
|
||||
c.closeOnce.Do(func() {
|
||||
klog.V(7).Infof("%s: tunneling connection Close()...", c.name)
|
||||
// Signal other endpoint that websocket connection is closing; ignore error.
|
||||
normalCloseMsg := gwebsocket.FormatCloseMessage(gwebsocket.CloseNormalClosure, "")
|
||||
writeControlErr := c.conn.WriteControl(gwebsocket.CloseMessage, normalCloseMsg, time.Now().Add(time.Second))
|
||||
closeErr := c.conn.Close()
|
||||
if closeErr != nil {
|
||||
err = closeErr
|
||||
} else if writeControlErr != nil {
|
||||
err = writeControlErr
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// LocalAddr implements part of the "net.Conn" interface, returning the local
|
||||
// endpoint network address of the tunneled connection.
|
||||
func (c *TunnelingConnection) LocalAddr() net.Addr {
|
||||
return c.conn.LocalAddr()
|
||||
}
|
||||
|
||||
// LocalAddr implements part of the "net.Conn" interface, returning the remote
|
||||
// endpoint network address of the tunneled connection.
|
||||
func (c *TunnelingConnection) RemoteAddr() net.Addr {
|
||||
return c.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
// SetDeadline sets the *absolute* time in the future for both
|
||||
// read and write deadlines. Returns an error if one occurs.
|
||||
func (c *TunnelingConnection) SetDeadline(t time.Time) error {
|
||||
rerr := c.SetReadDeadline(t)
|
||||
werr := c.SetWriteDeadline(t)
|
||||
return errors.Join(rerr, werr)
|
||||
}
|
||||
|
||||
// SetDeadline sets the *absolute* time in the future for the
|
||||
// read deadlines. Returns an error if one occurs.
|
||||
func (c *TunnelingConnection) SetReadDeadline(t time.Time) error {
|
||||
return c.conn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetDeadline sets the *absolute* time in the future for the
|
||||
// write deadlines. Returns an error if one occurs.
|
||||
func (c *TunnelingConnection) SetWriteDeadline(t time.Time) error {
|
||||
return c.conn.SetWriteDeadline(t)
|
||||
}
|
93
vendor/k8s.io/client-go/tools/portforward/tunneling_dialer.go
generated
vendored
93
vendor/k8s.io/client-go/tools/portforward/tunneling_dialer.go
generated
vendored
@ -1,93 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package portforward
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream/spdy"
|
||||
constants "k8s.io/apimachinery/pkg/util/portforward"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/websocket"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const PingPeriod = 10 * time.Second
|
||||
|
||||
// tunnelingDialer implements "httpstream.Dial" interface
|
||||
type tunnelingDialer struct {
|
||||
url *url.URL
|
||||
transport http.RoundTripper
|
||||
holder websocket.ConnectionHolder
|
||||
}
|
||||
|
||||
// NewTunnelingDialer creates and returns the tunnelingDialer structure which implemements the "httpstream.Dialer"
|
||||
// interface. The dialer can upgrade a websocket request, creating a websocket connection. This function
|
||||
// returns an error if one occurs.
|
||||
func NewSPDYOverWebsocketDialer(url *url.URL, config *restclient.Config) (httpstream.Dialer, error) {
|
||||
transport, holder, err := websocket.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tunnelingDialer{
|
||||
url: url,
|
||||
transport: transport,
|
||||
holder: holder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Dial upgrades to a tunneling streaming connection, returning a SPDY connection
|
||||
// containing a WebSockets connection (which implements "net.Conn"). Also
|
||||
// returns the protocol negotiated, or an error.
|
||||
func (d *tunnelingDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
|
||||
// There is no passed context, so skip the context when creating request for now.
|
||||
// Websockets requires "GET" method: RFC 6455 Sec. 4.1 (page 17).
|
||||
req, err := http.NewRequest("GET", d.url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
// Add the spdy tunneling prefix to the requested protocols. The tunneling
|
||||
// handler will know how to negotiate these protocols.
|
||||
tunnelingProtocols := []string{}
|
||||
for _, protocol := range protocols {
|
||||
tunnelingProtocol := constants.WebsocketsSPDYTunnelingPrefix + protocol
|
||||
tunnelingProtocols = append(tunnelingProtocols, tunnelingProtocol)
|
||||
}
|
||||
klog.V(4).Infoln("Before WebSocket Upgrade Connection...")
|
||||
conn, err := websocket.Negotiate(d.transport, d.holder, req, tunnelingProtocols...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if conn == nil {
|
||||
return nil, "", fmt.Errorf("negotiated websocket connection is nil")
|
||||
}
|
||||
protocol := conn.Subprotocol()
|
||||
protocol = strings.TrimPrefix(protocol, constants.WebsocketsSPDYTunnelingPrefix)
|
||||
klog.V(4).Infof("negotiated protocol: %s", protocol)
|
||||
|
||||
// Wrap the websocket connection which implements "net.Conn".
|
||||
tConn := NewTunnelingConnection("client", conn)
|
||||
// Create SPDY connection injecting the previously created tunneling connection.
|
||||
spdyConn, err := spdy.NewClientConnectionWithPings(tConn, PingPeriod)
|
||||
|
||||
return spdyConn, protocol, err
|
||||
}
|
10
vendor/k8s.io/client-go/tools/remotecommand/OWNERS
generated
vendored
10
vendor/k8s.io/client-go/tools/remotecommand/OWNERS
generated
vendored
@ -1,10 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- aojea
|
||||
- liggitt
|
||||
- seans3
|
||||
reviewers:
|
||||
- aojea
|
||||
- liggitt
|
||||
- seans3
|
20
vendor/k8s.io/client-go/tools/remotecommand/doc.go
generated
vendored
20
vendor/k8s.io/client-go/tools/remotecommand/doc.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package remotecommand adds support for executing commands in containers,
|
||||
// with support for separate stdin, stdout, and stderr streams, as well as
|
||||
// TTY.
|
||||
package remotecommand // import "k8s.io/client-go/tools/remotecommand"
|
54
vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
generated
vendored
54
vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// errorStreamDecoder interprets the data on the error channel and creates a go error object from it.
|
||||
type errorStreamDecoder interface {
|
||||
decode(message []byte) error
|
||||
}
|
||||
|
||||
// watchErrorStream watches the errorStream for remote command error data,
|
||||
// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote
|
||||
// command exited successfully) to the returned error channel, and closes it.
|
||||
// This function returns immediately.
|
||||
func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error {
|
||||
errorChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
message, err := io.ReadAll(errorStream)
|
||||
switch {
|
||||
case err != nil && err != io.EOF:
|
||||
errorChan <- fmt.Errorf("error reading from error stream: %s", err)
|
||||
case len(message) > 0:
|
||||
errorChan <- d.decode(message)
|
||||
default:
|
||||
errorChan <- nil
|
||||
}
|
||||
close(errorChan)
|
||||
}()
|
||||
|
||||
return errorChan
|
||||
}
|
60
vendor/k8s.io/client-go/tools/remotecommand/fallback.go
generated
vendored
60
vendor/k8s.io/client-go/tools/remotecommand/fallback.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var _ Executor = &FallbackExecutor{}
|
||||
|
||||
type FallbackExecutor struct {
|
||||
primary Executor
|
||||
secondary Executor
|
||||
shouldFallback func(error) bool
|
||||
}
|
||||
|
||||
// NewFallbackExecutor creates an Executor that first attempts to use the
|
||||
// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial
|
||||
// websocket "StreamWithContext" call fails.
|
||||
// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) {
|
||||
return &FallbackExecutor{
|
||||
primary: primary,
|
||||
secondary: secondary,
|
||||
shouldFallback: shouldFallback,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream is deprecated. Please use "StreamWithContext".
|
||||
func (f *FallbackExecutor) Stream(options StreamOptions) error {
|
||||
return f.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// StreamWithContext initially attempts to call "StreamWithContext" using the
|
||||
// primary executor, falling back to calling the secondary executor if the
|
||||
// initial primary call to upgrade to a websocket connection fails.
|
||||
func (f *FallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
err := f.primary.StreamWithContext(ctx, options)
|
||||
if f.shouldFallback(err) {
|
||||
klog.V(4).Infof("RemoteCommand fallback: %v", err)
|
||||
return f.secondary.StreamWithContext(ctx, options)
|
||||
}
|
||||
return err
|
||||
}
|
41
vendor/k8s.io/client-go/tools/remotecommand/reader.go
generated
vendored
41
vendor/k8s.io/client-go/tools/remotecommand/reader.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented,
|
||||
// to keep io.Copy from doing things we don't want when copying from the reader to the data stream.
|
||||
//
|
||||
// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]),
|
||||
// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call.
|
||||
// That results in an oversized call to spdystream.Stream#Write [3],
|
||||
// which results in a single oversized data frame[4] that is too large.
|
||||
//
|
||||
// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo
|
||||
// [2] https://golang.org/pkg/io/#Copy
|
||||
// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73
|
||||
// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304
|
||||
type readerWrapper struct {
|
||||
reader io.Reader
|
||||
}
|
||||
|
||||
func (r readerWrapper) Read(p []byte) (int, error) {
|
||||
return r.reader.Read(p)
|
||||
}
|
58
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
58
vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
)
|
||||
|
||||
// StreamOptions holds information pertaining to the current streaming session:
|
||||
// input/output streams, if the client is requesting a TTY, and a terminal size queue to
|
||||
// support terminal resizing.
|
||||
type StreamOptions struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Tty bool
|
||||
TerminalSizeQueue TerminalSizeQueue
|
||||
}
|
||||
|
||||
// Executor is an interface for transporting shell-style streams.
|
||||
type Executor interface {
|
||||
// Deprecated: use StreamWithContext instead to avoid possible resource leaks.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/103177 for details.
|
||||
Stream(options StreamOptions) error
|
||||
|
||||
// StreamWithContext initiates the transport of the standard shell streams. It will
|
||||
// transport any non-nil stream to a remote system, and return an error if a problem
|
||||
// occurs. If tty is set, the stderr stream is not used (raw TTY manages stdout and
|
||||
// stderr over the stdout stream).
|
||||
// The context controls the entire lifetime of stream execution.
|
||||
StreamWithContext(ctx context.Context, options StreamOptions) error
|
||||
}
|
||||
|
||||
type streamCreator interface {
|
||||
CreateStream(headers http.Header) (httpstream.Stream, error)
|
||||
}
|
||||
|
||||
type streamProtocolHandler interface {
|
||||
stream(conn streamCreator) error
|
||||
}
|
33
vendor/k8s.io/client-go/tools/remotecommand/resize.go
generated
vendored
33
vendor/k8s.io/client-go/tools/remotecommand/resize.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term
|
||||
// and were moved in order to decouple client from other term dependencies
|
||||
|
||||
// TerminalSize represents the width and height of a terminal.
|
||||
type TerminalSize struct {
|
||||
Width uint16
|
||||
Height uint16
|
||||
}
|
||||
|
||||
// TerminalSizeQueue is capable of returning terminal resize events as they occur.
|
||||
type TerminalSizeQueue interface {
|
||||
// Next returns the new terminal size after the terminal has been resized. It returns nil when
|
||||
// monitoring has been stopped.
|
||||
Next() *TerminalSize
|
||||
}
|
171
vendor/k8s.io/client-go/tools/remotecommand/spdy.go
generated
vendored
171
vendor/k8s.io/client-go/tools/remotecommand/spdy.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// spdyStreamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type spdyStreamExecutor struct {
|
||||
upgrader spdy.Upgrader
|
||||
transport http.RoundTripper
|
||||
|
||||
method string
|
||||
url *url.URL
|
||||
protocols []string
|
||||
rejectRedirects bool // if true, receiving redirect from upstream is an error
|
||||
}
|
||||
|
||||
// NewSPDYExecutor connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams.
|
||||
func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
|
||||
wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future
|
||||
// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated)
|
||||
// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect
|
||||
// during the attempted upgrade in these "Stream" calls, an error is returned.
|
||||
func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spdyExecutor := executor.(*spdyStreamExecutor)
|
||||
spdyExecutor.rejectRedirects = true
|
||||
return spdyExecutor, nil
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForTransports connects to the provided server using the given transport,
|
||||
// upgrades the response using the given upgrader to multiplexed bidirectional streams.
|
||||
func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
|
||||
return NewSPDYExecutorForProtocols(
|
||||
transport, upgrader, method, url,
|
||||
remotecommand.StreamProtocolV5Name,
|
||||
remotecommand.StreamProtocolV4Name,
|
||||
remotecommand.StreamProtocolV3Name,
|
||||
remotecommand.StreamProtocolV2Name,
|
||||
remotecommand.StreamProtocolV1Name,
|
||||
)
|
||||
}
|
||||
|
||||
// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
|
||||
// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
|
||||
// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
|
||||
func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
|
||||
return &spdyStreamExecutor{
|
||||
upgrader: upgrader,
|
||||
transport: transport,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stream opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects.
|
||||
func (e *spdyStreamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it.
|
||||
func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error creating request: %v", err)
|
||||
}
|
||||
|
||||
client := http.Client{Transport: e.transport}
|
||||
if e.rejectRedirects {
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return fmt.Errorf("redirect not allowed")
|
||||
}
|
||||
}
|
||||
conn, protocol, err := spdy.Negotiate(
|
||||
e.upgrader,
|
||||
&client,
|
||||
req,
|
||||
e.protocols...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
|
||||
switch protocol {
|
||||
case remotecommand.StreamProtocolV5Name:
|
||||
streamer = newStreamProtocolV5(options)
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
return conn, streamer, nil
|
||||
}
|
||||
|
||||
// StreamWithContext opens a protocol streamer to the server and streams until a client closes
|
||||
// the connection or the server disconnects or the context is done.
|
||||
func (e *spdyStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
conn, streamer, err := e.newConnectionAndStream(ctx, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
errorChan <- streamer.stream(conn)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
159
vendor/k8s.io/client-go/tools/remotecommand/v1.go
generated
vendored
159
vendor/k8s.io/client-go/tools/remotecommand/v1.go
generated
vendored
@ -1,159 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// streamProtocolV1 implements the first version of the streaming exec & attach
|
||||
// protocol. This version has some bugs, such as not being able to detect when
|
||||
// non-interactive stdin data has ended. See https://issues.k8s.io/13394 and
|
||||
// https://issues.k8s.io/13395 for more details.
|
||||
type streamProtocolV1 struct {
|
||||
StreamOptions
|
||||
|
||||
errorStream httpstream.Stream
|
||||
remoteStdin httpstream.Stream
|
||||
remoteStdout httpstream.Stream
|
||||
remoteStderr httpstream.Stream
|
||||
}
|
||||
|
||||
var _ streamProtocolHandler = &streamProtocolV1{}
|
||||
|
||||
func newStreamProtocolV1(options StreamOptions) streamProtocolHandler {
|
||||
return &streamProtocolV1{
|
||||
StreamOptions: options,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV1) stream(conn streamCreator) error {
|
||||
doneChan := make(chan struct{}, 2)
|
||||
errorChan := make(chan error)
|
||||
|
||||
cp := func(s string, dst io.Writer, src io.Reader) {
|
||||
klog.V(6).Infof("Copying %s", s)
|
||||
defer klog.V(6).Infof("Done copying %s", s)
|
||||
if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
|
||||
klog.Errorf("Error copying %s: %v", s, err)
|
||||
}
|
||||
if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr {
|
||||
doneChan <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// set up all the streams first
|
||||
var err error
|
||||
headers := http.Header{}
|
||||
headers.Set(v1.StreamType, v1.StreamTypeError)
|
||||
p.errorStream, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.errorStream.Reset()
|
||||
|
||||
// Create all the streams first, then start the copy goroutines. The server doesn't start its copy
|
||||
// goroutines until it's received all of the streams. If the client creates the stdin stream and
|
||||
// immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the
|
||||
// spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't
|
||||
// getting processed because the server hasn't started its copying, and it won't do that until it
|
||||
// gets all the streams. By creating all the streams first, we ensure that the server is ready to
|
||||
// process data before the client starts sending any. See https://issues.k8s.io/16373 for more info.
|
||||
if p.Stdin != nil {
|
||||
headers.Set(v1.StreamType, v1.StreamTypeStdin)
|
||||
p.remoteStdin, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.remoteStdin.Reset()
|
||||
}
|
||||
|
||||
if p.Stdout != nil {
|
||||
headers.Set(v1.StreamType, v1.StreamTypeStdout)
|
||||
p.remoteStdout, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.remoteStdout.Reset()
|
||||
}
|
||||
|
||||
if p.Stderr != nil && !p.Tty {
|
||||
headers.Set(v1.StreamType, v1.StreamTypeStderr)
|
||||
p.remoteStderr, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.remoteStderr.Reset()
|
||||
}
|
||||
|
||||
// now that all the streams have been created, proceed with reading & copying
|
||||
|
||||
// always read from errorStream
|
||||
go func() {
|
||||
message, err := io.ReadAll(p.errorStream)
|
||||
if err != nil && err != io.EOF {
|
||||
errorChan <- fmt.Errorf("Error reading from error stream: %s", err)
|
||||
return
|
||||
}
|
||||
if len(message) > 0 {
|
||||
errorChan <- fmt.Errorf("Error executing remote command: %s", message)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if p.Stdin != nil {
|
||||
// TODO this goroutine will never exit cleanly (the io.Copy never unblocks)
|
||||
// because stdin is not closed until the process exits. If we try to call
|
||||
// stdin.Close(), it returns no error but doesn't unblock the copy. It will
|
||||
// exit when the process exits, instead.
|
||||
go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin})
|
||||
}
|
||||
|
||||
waitCount := 0
|
||||
completedStreams := 0
|
||||
|
||||
if p.Stdout != nil {
|
||||
waitCount++
|
||||
go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout)
|
||||
}
|
||||
|
||||
if p.Stderr != nil && !p.Tty {
|
||||
waitCount++
|
||||
go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr)
|
||||
}
|
||||
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-doneChan:
|
||||
completedStreams++
|
||||
if completedStreams == waitCount {
|
||||
break Loop
|
||||
}
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
199
vendor/k8s.io/client-go/tools/remotecommand/v2.go
generated
vendored
199
vendor/k8s.io/client-go/tools/remotecommand/v2.go
generated
vendored
@ -1,199 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// streamProtocolV2 implements version 2 of the streaming protocol for attach
|
||||
// and exec. The original streaming protocol was metav1. As a result, this
|
||||
// version is referred to as version 2, even though it is the first actual
|
||||
// numbered version.
|
||||
type streamProtocolV2 struct {
|
||||
StreamOptions
|
||||
|
||||
errorStream io.Reader
|
||||
remoteStdin io.ReadWriteCloser
|
||||
remoteStdout io.Reader
|
||||
remoteStderr io.Reader
|
||||
}
|
||||
|
||||
var _ streamProtocolHandler = &streamProtocolV2{}
|
||||
|
||||
func newStreamProtocolV2(options StreamOptions) streamProtocolHandler {
|
||||
return &streamProtocolV2{
|
||||
StreamOptions: options,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV2) createStreams(conn streamCreator) error {
|
||||
var err error
|
||||
headers := http.Header{}
|
||||
|
||||
// set up error stream
|
||||
headers.Set(v1.StreamType, v1.StreamTypeError)
|
||||
p.errorStream, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set up stdin stream
|
||||
if p.Stdin != nil {
|
||||
headers.Set(v1.StreamType, v1.StreamTypeStdin)
|
||||
p.remoteStdin, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// set up stdout stream
|
||||
if p.Stdout != nil {
|
||||
headers.Set(v1.StreamType, v1.StreamTypeStdout)
|
||||
p.remoteStdout, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// set up stderr stream
|
||||
if p.Stderr != nil && !p.Tty {
|
||||
headers.Set(v1.StreamType, v1.StreamTypeStderr)
|
||||
p.remoteStderr, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *streamProtocolV2) copyStdin() {
|
||||
if p.Stdin != nil {
|
||||
var once sync.Once
|
||||
|
||||
// copy from client's stdin to container's stdin
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
// if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i <pod> -- cat`, make sure
|
||||
// we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise
|
||||
// the executed command will remain running.
|
||||
defer once.Do(func() { p.remoteStdin.Close() })
|
||||
|
||||
if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil {
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// read from remoteStdin until the stream is closed. this is essential to
|
||||
// be able to exit interactive sessions cleanly and not leak goroutines or
|
||||
// hang the client's terminal.
|
||||
//
|
||||
// TODO we aren't using go-dockerclient any more; revisit this to determine if it's still
|
||||
// required by engine-api.
|
||||
//
|
||||
// go-dockerclient's current hijack implementation
|
||||
// (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564)
|
||||
// waits for all three streams (stdin/stdout/stderr) to finish copying
|
||||
// before returning. When hijack finishes copying stdout/stderr, it calls
|
||||
// Close() on its side of remoteStdin, which allows this copy to complete.
|
||||
// When that happens, we must Close() on our side of remoteStdin, to
|
||||
// allow the copy in hijack to complete, and hijack to return.
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
defer once.Do(func() { p.remoteStdin.Close() })
|
||||
|
||||
// this "copy" doesn't actually read anything - it's just here to wait for
|
||||
// the server to close remoteStdin.
|
||||
if _, err := io.Copy(io.Discard, p.remoteStdin); err != nil {
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) {
|
||||
if p.Stdout == nil {
|
||||
return
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
defer wg.Done()
|
||||
// make sure, packet in queue can be consumed.
|
||||
// block in queue may lead to deadlock in conn.server
|
||||
// issue: https://github.com/kubernetes/kubernetes/issues/96339
|
||||
defer io.Copy(io.Discard, p.remoteStdout)
|
||||
|
||||
if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil {
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) {
|
||||
if p.Stderr == nil || p.Tty {
|
||||
return
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
defer wg.Done()
|
||||
defer io.Copy(io.Discard, p.remoteStderr)
|
||||
|
||||
if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil {
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *streamProtocolV2) stream(conn streamCreator) error {
|
||||
if err := p.createStreams(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now that all the streams have been created, proceed with reading & copying
|
||||
|
||||
errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{})
|
||||
|
||||
p.copyStdin()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
p.copyStdout(&wg)
|
||||
p.copyStderr(&wg)
|
||||
|
||||
// we're waiting for stdout/stderr to finish copying
|
||||
wg.Wait()
|
||||
|
||||
// waits for errorStream to finish reading with an error or nil
|
||||
return <-errorChan
|
||||
}
|
||||
|
||||
// errorDecoderV2 interprets the error channel data as plain text.
|
||||
type errorDecoderV2 struct{}
|
||||
|
||||
func (d *errorDecoderV2) decode(message []byte) error {
|
||||
return fmt.Errorf("error executing remote command: %s", message)
|
||||
}
|
111
vendor/k8s.io/client-go/tools/remotecommand/v3.go
generated
vendored
111
vendor/k8s.io/client-go/tools/remotecommand/v3.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// streamProtocolV3 implements version 3 of the streaming protocol for attach
|
||||
// and exec. This version adds support for resizing the container's terminal.
|
||||
type streamProtocolV3 struct {
|
||||
*streamProtocolV2
|
||||
|
||||
resizeStream io.Writer
|
||||
}
|
||||
|
||||
var _ streamProtocolHandler = &streamProtocolV3{}
|
||||
|
||||
func newStreamProtocolV3(options StreamOptions) streamProtocolHandler {
|
||||
return &streamProtocolV3{
|
||||
streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV3) createStreams(conn streamCreator) error {
|
||||
// set up the streams from v2
|
||||
if err := p.streamProtocolV2.createStreams(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set up resize stream
|
||||
if p.Tty {
|
||||
headers := http.Header{}
|
||||
headers.Set(v1.StreamType, v1.StreamTypeResize)
|
||||
var err error
|
||||
p.resizeStream, err = conn.CreateStream(headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *streamProtocolV3) handleResizes() {
|
||||
if p.resizeStream == nil || p.TerminalSizeQueue == nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
encoder := json.NewEncoder(p.resizeStream)
|
||||
for {
|
||||
size := p.TerminalSizeQueue.Next()
|
||||
if size == nil {
|
||||
return
|
||||
}
|
||||
if err := encoder.Encode(&size); err != nil {
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *streamProtocolV3) stream(conn streamCreator) error {
|
||||
if err := p.createStreams(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now that all the streams have been created, proceed with reading & copying
|
||||
|
||||
errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{})
|
||||
|
||||
p.handleResizes()
|
||||
|
||||
p.copyStdin()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
p.copyStdout(&wg)
|
||||
p.copyStderr(&wg)
|
||||
|
||||
// we're waiting for stdout/stderr to finish copying
|
||||
wg.Wait()
|
||||
|
||||
// waits for errorStream to finish reading with an error or nil
|
||||
return <-errorChan
|
||||
}
|
||||
|
||||
type errorDecoderV3 struct {
|
||||
errorDecoderV2
|
||||
}
|
119
vendor/k8s.io/client-go/tools/remotecommand/v4.go
generated
vendored
119
vendor/k8s.io/client-go/tools/remotecommand/v4.go
generated
vendored
@ -1,119 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
"k8s.io/client-go/util/exec"
|
||||
)
|
||||
|
||||
// streamProtocolV4 implements version 4 of the streaming protocol for attach
|
||||
// and exec. This version adds support for exit codes on the error stream through
|
||||
// the use of metav1.Status instead of plain text messages.
|
||||
type streamProtocolV4 struct {
|
||||
*streamProtocolV3
|
||||
}
|
||||
|
||||
var _ streamProtocolHandler = &streamProtocolV4{}
|
||||
|
||||
func newStreamProtocolV4(options StreamOptions) streamProtocolHandler {
|
||||
return &streamProtocolV4{
|
||||
streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV4) createStreams(conn streamCreator) error {
|
||||
return p.streamProtocolV3.createStreams(conn)
|
||||
}
|
||||
|
||||
func (p *streamProtocolV4) handleResizes() {
|
||||
p.streamProtocolV3.handleResizes()
|
||||
}
|
||||
|
||||
func (p *streamProtocolV4) stream(conn streamCreator) error {
|
||||
if err := p.createStreams(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// now that all the streams have been created, proceed with reading & copying
|
||||
|
||||
errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{})
|
||||
|
||||
p.handleResizes()
|
||||
|
||||
p.copyStdin()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
p.copyStdout(&wg)
|
||||
p.copyStderr(&wg)
|
||||
|
||||
// we're waiting for stdout/stderr to finish copying
|
||||
wg.Wait()
|
||||
|
||||
// waits for errorStream to finish reading with an error or nil
|
||||
return <-errorChan
|
||||
}
|
||||
|
||||
// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel
|
||||
// and creates an exec.ExitError from it.
|
||||
type errorDecoderV4 struct{}
|
||||
|
||||
func (d *errorDecoderV4) decode(message []byte) error {
|
||||
status := metav1.Status{}
|
||||
err := json.Unmarshal(message, &status)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error stream protocol error: %v in %q", err, string(message))
|
||||
}
|
||||
switch status.Status {
|
||||
case metav1.StatusSuccess:
|
||||
return nil
|
||||
case metav1.StatusFailure:
|
||||
if status.Reason == remotecommand.NonZeroExitCodeReason {
|
||||
if status.Details == nil {
|
||||
return errors.New("error stream protocol error: details must be set")
|
||||
}
|
||||
for i := range status.Details.Causes {
|
||||
c := &status.Details.Causes[i]
|
||||
if c.Type != remotecommand.ExitCodeCauseType {
|
||||
continue
|
||||
}
|
||||
|
||||
rc, err := strconv.ParseUint(c.Message, 10, 8)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message)
|
||||
}
|
||||
return exec.CodeExitError{
|
||||
Err: fmt.Errorf("command terminated with exit code %d", rc),
|
||||
Code: int(rc),
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType)
|
||||
}
|
||||
default:
|
||||
return errors.New("error stream protocol error: unknown error")
|
||||
}
|
||||
|
||||
return errors.New(status.Message)
|
||||
}
|
35
vendor/k8s.io/client-go/tools/remotecommand/v5.go
generated
vendored
35
vendor/k8s.io/client-go/tools/remotecommand/v5.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
// streamProtocolV5 add support for V5 of the remote command subprotocol.
|
||||
// For the streamProtocolHandler, this version is the same as V4.
|
||||
type streamProtocolV5 struct {
|
||||
*streamProtocolV4
|
||||
}
|
||||
|
||||
var _ streamProtocolHandler = &streamProtocolV5{}
|
||||
|
||||
func newStreamProtocolV5(options StreamOptions) streamProtocolHandler {
|
||||
return &streamProtocolV5{
|
||||
streamProtocolV4: newStreamProtocolV4(options).(*streamProtocolV4),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamProtocolV5) stream(conn streamCreator) error {
|
||||
return p.streamProtocolV4.stream(conn)
|
||||
}
|
515
vendor/k8s.io/client-go/tools/remotecommand/websocket.go
generated
vendored
515
vendor/k8s.io/client-go/tools/remotecommand/websocket.go
generated
vendored
@ -1,515 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remotecommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gwebsocket "github.com/gorilla/websocket"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport/websocket"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// writeDeadline defines the time that a client-side write to the websocket
|
||||
// connection must complete before an i/o timeout occurs.
|
||||
const writeDeadline = 60 * time.Second
|
||||
|
||||
var (
|
||||
_ Executor = &wsStreamExecutor{}
|
||||
_ streamCreator = &wsStreamCreator{}
|
||||
_ httpstream.Stream = &stream{}
|
||||
|
||||
streamType2streamID = map[string]byte{
|
||||
v1.StreamTypeStdin: remotecommand.StreamStdIn,
|
||||
v1.StreamTypeStdout: remotecommand.StreamStdOut,
|
||||
v1.StreamTypeStderr: remotecommand.StreamStdErr,
|
||||
v1.StreamTypeError: remotecommand.StreamErr,
|
||||
v1.StreamTypeResize: remotecommand.StreamResize,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// pingPeriod defines how often a heartbeat "ping" message is sent.
|
||||
pingPeriod = 5 * time.Second
|
||||
// pingReadDeadline defines the time waiting for a response heartbeat
|
||||
// "pong" message before a timeout error occurs for websocket reading.
|
||||
// This duration must always be greater than the "pingPeriod". By defining
|
||||
// this deadline in terms of the ping period, we are essentially saying
|
||||
// we can drop "X" (e.g. 12) pings before firing the timeout.
|
||||
pingReadDeadline = (pingPeriod * 12) + (1 * time.Second)
|
||||
)
|
||||
|
||||
// wsStreamExecutor handles transporting standard shell streams over an httpstream connection.
|
||||
type wsStreamExecutor struct {
|
||||
transport http.RoundTripper
|
||||
upgrader websocket.ConnectionHolder
|
||||
method string
|
||||
url string
|
||||
// requested protocols in priority order (e.g. v5.channel.k8s.io before v4.channel.k8s.io).
|
||||
protocols []string
|
||||
// selected protocol from the handshake process; could be empty string if handshake fails.
|
||||
negotiated string
|
||||
// period defines how often a "ping" heartbeat message is sent to the other endpoint.
|
||||
heartbeatPeriod time.Duration
|
||||
// deadline defines the amount of time before "pong" response must be received.
|
||||
heartbeatDeadline time.Duration
|
||||
}
|
||||
|
||||
func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) {
|
||||
// Only supports V5 protocol for correct version skew functionality.
|
||||
// Previous api servers will proxy upgrade requests to legacy websocket
|
||||
// servers on container runtimes which support V1-V4. These legacy
|
||||
// websocket servers will not handle the new CLOSE signal.
|
||||
return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name)
|
||||
}
|
||||
|
||||
// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection.
|
||||
func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) {
|
||||
transport, upgrader, err := websocket.RoundTripperFor(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating websocket transports: %v", err)
|
||||
}
|
||||
return &wsStreamExecutor{
|
||||
transport: transport,
|
||||
upgrader: upgrader,
|
||||
method: method,
|
||||
url: url,
|
||||
protocols: protocols,
|
||||
heartbeatPeriod: pingPeriod,
|
||||
heartbeatDeadline: pingReadDeadline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Deprecated: use StreamWithContext instead to avoid possible resource leaks.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/103177 for details.
|
||||
func (e *wsStreamExecutor) Stream(options StreamOptions) error {
|
||||
return e.StreamWithContext(context.Background(), options)
|
||||
}
|
||||
|
||||
// StreamWithContext upgrades an HTTPRequest to a WebSocket connection, and starts the various
|
||||
// goroutines to implement the necessary streams over the connection. The "options" parameter
|
||||
// defines which streams are requested. Returns an error if one occurred. This method is NOT
|
||||
// safe to run concurrently with the same executor (because of the state stored in the upgrader).
|
||||
func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error {
|
||||
req, err := http.NewRequestWithContext(ctx, e.method, e.url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn, err := websocket.Negotiate(e.transport, e.upgrader, req, e.protocols...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conn == nil {
|
||||
panic(fmt.Errorf("websocket connection is nil"))
|
||||
}
|
||||
defer conn.Close()
|
||||
e.negotiated = conn.Subprotocol()
|
||||
klog.V(4).Infof("The subprotocol is %s", e.negotiated)
|
||||
|
||||
var streamer streamProtocolHandler
|
||||
switch e.negotiated {
|
||||
case remotecommand.StreamProtocolV5Name:
|
||||
streamer = newStreamProtocolV5(options)
|
||||
case remotecommand.StreamProtocolV4Name:
|
||||
streamer = newStreamProtocolV4(options)
|
||||
case remotecommand.StreamProtocolV3Name:
|
||||
streamer = newStreamProtocolV3(options)
|
||||
case remotecommand.StreamProtocolV2Name:
|
||||
streamer = newStreamProtocolV2(options)
|
||||
case "":
|
||||
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
|
||||
fallthrough
|
||||
case remotecommand.StreamProtocolV1Name:
|
||||
streamer = newStreamProtocolV1(options)
|
||||
}
|
||||
|
||||
panicChan := make(chan any, 1)
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
}
|
||||
}()
|
||||
creator := newWSStreamCreator(conn)
|
||||
go creator.readDemuxLoop(
|
||||
e.upgrader.DataBufferSize(),
|
||||
e.heartbeatPeriod,
|
||||
e.heartbeatDeadline,
|
||||
)
|
||||
errorChan <- streamer.stream(creator)
|
||||
}()
|
||||
|
||||
select {
|
||||
case p := <-panicChan:
|
||||
panic(p)
|
||||
case err := <-errorChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
type wsStreamCreator struct {
|
||||
conn *gwebsocket.Conn
|
||||
// Protects writing to websocket connection; reading is lock-free
|
||||
connWriteLock sync.Mutex
|
||||
// map of stream id to stream; multiple streams read/write the connection
|
||||
streams map[byte]*stream
|
||||
streamsMu sync.Mutex
|
||||
// setStreamErr holds the error to return to anyone calling setStreams.
|
||||
// this is populated in closeAllStreamReaders
|
||||
setStreamErr error
|
||||
}
|
||||
|
||||
func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator {
|
||||
return &wsStreamCreator{
|
||||
conn: conn,
|
||||
streams: map[byte]*stream{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *wsStreamCreator) getStream(id byte) *stream {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
return c.streams[id]
|
||||
}
|
||||
|
||||
func (c *wsStreamCreator) setStream(id byte, s *stream) error {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
if c.setStreamErr != nil {
|
||||
return c.setStreamErr
|
||||
}
|
||||
c.streams[id] = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateStream uses id from passed headers to create a stream over "c.conn" connection.
|
||||
// Returns a Stream structure or nil and an error if one occurred.
|
||||
func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) {
|
||||
streamType := headers.Get(v1.StreamType)
|
||||
id, ok := streamType2streamID[streamType]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown stream type: %s", streamType)
|
||||
}
|
||||
if s := c.getStream(id); s != nil {
|
||||
return nil, fmt.Errorf("duplicate stream for type %s", streamType)
|
||||
}
|
||||
reader, writer := io.Pipe()
|
||||
s := &stream{
|
||||
headers: headers,
|
||||
readPipe: reader,
|
||||
writePipe: writer,
|
||||
conn: c.conn,
|
||||
connWriteLock: &c.connWriteLock,
|
||||
id: id,
|
||||
}
|
||||
if err := c.setStream(id, s); err != nil {
|
||||
_ = s.writePipe.Close()
|
||||
_ = s.readPipe.Close()
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket
|
||||
// connection. This loop reads the connection, and demultiplexes the data
|
||||
// into one of the individual stream pipes (by checking the stream id). This
|
||||
// loop can *not* be run concurrently, because there can only be one websocket
|
||||
// connection reader at a time (a read mutex would provide no benefit).
|
||||
func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, deadline time.Duration) {
|
||||
// Initialize and start the ping/pong heartbeat.
|
||||
h := newHeartbeat(c.conn, period, deadline)
|
||||
// Set initial timeout for websocket connection reading.
|
||||
if err := c.conn.SetReadDeadline(time.Now().Add(deadline)); err != nil {
|
||||
klog.Errorf("Websocket initial setting read deadline failed %v", err)
|
||||
return
|
||||
}
|
||||
go h.start()
|
||||
// Buffer size must correspond to the same size allocated
|
||||
// for the read buffer during websocket client creation. A
|
||||
// difference can cause incomplete connection reads.
|
||||
readBuffer := make([]byte, bufferSize)
|
||||
for {
|
||||
// NextReader() only returns data messages (BinaryMessage or Text
|
||||
// Message). Even though this call will never return control frames
|
||||
// such as ping, pong, or close, this call is necessary for these
|
||||
// message types to be processed. There can only be one reader
|
||||
// at a time, so this reader loop must *not* be run concurrently;
|
||||
// there is no lock for reading. Calling "NextReader()" before the
|
||||
// current reader has been processed will close the current reader.
|
||||
// If the heartbeat read deadline times out, this "NextReader()" will
|
||||
// return an i/o error, and error handling will clean up.
|
||||
messageType, r, err := c.conn.NextReader()
|
||||
if err != nil {
|
||||
websocketErr, ok := err.(*gwebsocket.CloseError)
|
||||
if ok && websocketErr.Code == gwebsocket.CloseNormalClosure {
|
||||
err = nil // readers will get io.EOF as it's a normal closure
|
||||
} else {
|
||||
err = fmt.Errorf("next reader: %w", err)
|
||||
}
|
||||
c.closeAllStreamReaders(err)
|
||||
return
|
||||
}
|
||||
// All remote command protocols send/receive only binary data messages.
|
||||
if messageType != gwebsocket.BinaryMessage {
|
||||
c.closeAllStreamReaders(fmt.Errorf("unexpected message type: %d", messageType))
|
||||
return
|
||||
}
|
||||
// It's ok to read just a single byte because the underlying library wraps the actual
|
||||
// connection with a buffered reader anyway.
|
||||
_, err = io.ReadFull(r, readBuffer[:1])
|
||||
if err != nil {
|
||||
c.closeAllStreamReaders(fmt.Errorf("read stream id: %w", err))
|
||||
return
|
||||
}
|
||||
streamID := readBuffer[0]
|
||||
s := c.getStream(streamID)
|
||||
if s == nil {
|
||||
klog.Errorf("Unknown stream id %d, discarding message", streamID)
|
||||
continue
|
||||
}
|
||||
for {
|
||||
nr, errRead := r.Read(readBuffer)
|
||||
if nr > 0 {
|
||||
// Write the data to the stream's pipe. This can block.
|
||||
_, errWrite := s.writePipe.Write(readBuffer[:nr])
|
||||
if errWrite != nil {
|
||||
// Pipe must have been closed by the stream user.
|
||||
// Nothing to do, discard the message.
|
||||
break
|
||||
}
|
||||
}
|
||||
if errRead != nil {
|
||||
if errRead == io.EOF {
|
||||
break
|
||||
}
|
||||
c.closeAllStreamReaders(fmt.Errorf("read message: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// closeAllStreamReaders closes readers in all streams.
|
||||
// This unblocks all stream.Read() calls, and keeps any future streams from being created.
|
||||
func (c *wsStreamCreator) closeAllStreamReaders(err error) {
|
||||
c.streamsMu.Lock()
|
||||
defer c.streamsMu.Unlock()
|
||||
for _, s := range c.streams {
|
||||
// Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes.
|
||||
_ = s.writePipe.CloseWithError(err)
|
||||
}
|
||||
// ensure callers to setStreams receive an error after this point
|
||||
if err != nil {
|
||||
c.setStreamErr = err
|
||||
} else {
|
||||
c.setStreamErr = fmt.Errorf("closed all streams")
|
||||
}
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
headers http.Header
|
||||
readPipe *io.PipeReader
|
||||
writePipe *io.PipeWriter
|
||||
// conn is used for writing directly into the connection.
|
||||
// Is nil after Close() / Reset() to prevent future writes.
|
||||
conn *gwebsocket.Conn
|
||||
// connWriteLock protects conn against concurrent write operations. There must be a single writer and a single reader only.
|
||||
// The mutex is shared across all streams because the underlying connection is shared.
|
||||
connWriteLock *sync.Mutex
|
||||
id byte
|
||||
}
|
||||
|
||||
func (s *stream) Read(p []byte) (n int, err error) {
|
||||
return s.readPipe.Read(p)
|
||||
}
|
||||
|
||||
// Write writes directly to the underlying WebSocket connection.
|
||||
func (s *stream) Write(p []byte) (n int, err error) {
|
||||
klog.V(4).Infof("Write() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Write() done on stream %d", s.id)
|
||||
s.connWriteLock.Lock()
|
||||
defer s.connWriteLock.Unlock()
|
||||
if s.conn == nil {
|
||||
return 0, fmt.Errorf("write on closed stream %d", s.id)
|
||||
}
|
||||
err = s.conn.SetWriteDeadline(time.Now().Add(writeDeadline))
|
||||
if err != nil {
|
||||
klog.V(7).Infof("Websocket setting write deadline failed %v", err)
|
||||
return 0, err
|
||||
}
|
||||
// Message writer buffers the message data, so we don't need to do that ourselves.
|
||||
// Just write id and the data as two separate writes to avoid allocating an intermediate buffer.
|
||||
w, err := s.conn.NextWriter(gwebsocket.BinaryMessage)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if w != nil {
|
||||
w.Close()
|
||||
}
|
||||
}()
|
||||
_, err = w.Write([]byte{s.id})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = w.Write(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
err = w.Close()
|
||||
w = nil
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close half-closes the stream, indicating this side is finished with the stream.
|
||||
func (s *stream) Close() error {
|
||||
klog.V(4).Infof("Close() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Close() done on stream %d", s.id)
|
||||
s.connWriteLock.Lock()
|
||||
defer s.connWriteLock.Unlock()
|
||||
if s.conn == nil {
|
||||
return fmt.Errorf("Close() on already closed stream %d", s.id)
|
||||
}
|
||||
// Communicate the CLOSE stream signal to the other websocket endpoint.
|
||||
err := s.conn.WriteMessage(gwebsocket.BinaryMessage, []byte{remotecommand.StreamClose, s.id})
|
||||
s.conn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *stream) Reset() error {
|
||||
klog.V(4).Infof("Reset() on stream %d", s.id)
|
||||
defer klog.V(4).Infof("Reset() done on stream %d", s.id)
|
||||
s.Close()
|
||||
return s.writePipe.Close()
|
||||
}
|
||||
|
||||
func (s *stream) Headers() http.Header {
|
||||
return s.headers
|
||||
}
|
||||
|
||||
func (s *stream) Identifier() uint32 {
|
||||
return uint32(s.id)
|
||||
}
|
||||
|
||||
// heartbeat encasulates data necessary for the websocket ping/pong heartbeat. This
|
||||
// heartbeat works by setting a read deadline on the websocket connection, then
|
||||
// pushing this deadline into the future for every successful heartbeat. If the
|
||||
// heartbeat "pong" fails to respond within the deadline, then the "NextReader()" call
|
||||
// inside the "readDemuxLoop" will return an i/o error prompting a connection close
|
||||
// and cleanup.
|
||||
type heartbeat struct {
|
||||
conn *gwebsocket.Conn
|
||||
// period defines how often a "ping" heartbeat message is sent to the other endpoint
|
||||
period time.Duration
|
||||
// closing the "closer" channel will clean up the heartbeat timers
|
||||
closer chan struct{}
|
||||
// optional data to send with "ping" message
|
||||
message []byte
|
||||
// optionally received data message with "pong" message, same as sent with ping
|
||||
pongMessage []byte
|
||||
}
|
||||
|
||||
// newHeartbeat creates heartbeat structure encapsulating fields necessary to
|
||||
// run the websocket connection ping/pong mechanism and sets up handlers on
|
||||
// the websocket connection.
|
||||
func newHeartbeat(conn *gwebsocket.Conn, period time.Duration, deadline time.Duration) *heartbeat {
|
||||
h := &heartbeat{
|
||||
conn: conn,
|
||||
period: period,
|
||||
closer: make(chan struct{}),
|
||||
}
|
||||
// Set up handler for receiving returned "pong" message from other endpoint
|
||||
// by pushing the read deadline into the future. The "msg" received could
|
||||
// be empty.
|
||||
h.conn.SetPongHandler(func(msg string) error {
|
||||
// Push the read deadline into the future.
|
||||
klog.V(8).Infof("Pong message received (%s)--resetting read deadline", msg)
|
||||
err := h.conn.SetReadDeadline(time.Now().Add(deadline))
|
||||
if err != nil {
|
||||
klog.Errorf("Websocket setting read deadline failed %v", err)
|
||||
return err
|
||||
}
|
||||
if len(msg) > 0 {
|
||||
h.pongMessage = []byte(msg)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
// Set up handler to cleanup timers when this endpoint receives "Close" message.
|
||||
closeHandler := h.conn.CloseHandler()
|
||||
h.conn.SetCloseHandler(func(code int, text string) error {
|
||||
close(h.closer)
|
||||
return closeHandler(code, text)
|
||||
})
|
||||
return h
|
||||
}
|
||||
|
||||
// setMessage is optional data sent with "ping" heartbeat. According to the websocket RFC
|
||||
// this data sent with "ping" message should be returned in "pong" message.
|
||||
func (h *heartbeat) setMessage(msg string) {
|
||||
h.message = []byte(msg)
|
||||
}
|
||||
|
||||
// start the heartbeat by setting up necesssary handlers and looping by sending "ping"
|
||||
// message every "period" until the "closer" channel is closed.
|
||||
func (h *heartbeat) start() {
|
||||
// Loop to continually send "ping" message through websocket connection every "period".
|
||||
t := time.NewTicker(h.period)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-h.closer:
|
||||
klog.V(8).Infof("closed channel--returning")
|
||||
return
|
||||
case <-t.C:
|
||||
// "WriteControl" does not need to be protected by a mutex. According to
|
||||
// gorilla/websockets library docs: "The Close and WriteControl methods can
|
||||
// be called concurrently with all other methods."
|
||||
if err := h.conn.WriteControl(gwebsocket.PingMessage, h.message, time.Now().Add(pingReadDeadline)); err == nil {
|
||||
klog.V(8).Infof("Websocket Ping succeeeded")
|
||||
} else {
|
||||
klog.Errorf("Websocket Ping failed: %v", err)
|
||||
if errors.Is(err, gwebsocket.ErrCloseSent) {
|
||||
// we continue because c.conn.CloseChan will manage closing the connection already
|
||||
continue
|
||||
} else if e, ok := err.(net.Error); ok && e.Timeout() {
|
||||
// Continue, in case this is a transient failure.
|
||||
// c.conn.CloseChan above will tell us when the connection is
|
||||
// actually closed.
|
||||
// If Temporary function hadn't been deprecated, we would have used it.
|
||||
// But most of temporary errors are timeout errors anyway.
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
150
vendor/k8s.io/client-go/tools/watch/informerwatcher.go
generated
vendored
150
vendor/k8s.io/client-go/tools/watch/informerwatcher.go
generated
vendored
@ -1,150 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
func newEventProcessor(out chan<- watch.Event) *eventProcessor {
|
||||
return &eventProcessor{
|
||||
out: out,
|
||||
cond: sync.NewCond(&sync.Mutex{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// eventProcessor buffers events and writes them to an out chan when a reader
|
||||
// is waiting. Because of the requirement to buffer events, it synchronizes
|
||||
// input with a condition, and synchronizes output with a channels. It needs to
|
||||
// be able to yield while both waiting on an input condition and while blocked
|
||||
// on writing to the output channel.
|
||||
type eventProcessor struct {
|
||||
out chan<- watch.Event
|
||||
|
||||
cond *sync.Cond
|
||||
buff []watch.Event
|
||||
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (e *eventProcessor) run() {
|
||||
for {
|
||||
batch := e.takeBatch()
|
||||
e.writeBatch(batch)
|
||||
if e.stopped() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventProcessor) takeBatch() []watch.Event {
|
||||
e.cond.L.Lock()
|
||||
defer e.cond.L.Unlock()
|
||||
|
||||
for len(e.buff) == 0 && !e.stopped() {
|
||||
e.cond.Wait()
|
||||
}
|
||||
|
||||
batch := e.buff
|
||||
e.buff = nil
|
||||
return batch
|
||||
}
|
||||
|
||||
func (e *eventProcessor) writeBatch(events []watch.Event) {
|
||||
for _, event := range events {
|
||||
select {
|
||||
case e.out <- event:
|
||||
case <-e.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventProcessor) push(event watch.Event) {
|
||||
e.cond.L.Lock()
|
||||
defer e.cond.L.Unlock()
|
||||
defer e.cond.Signal()
|
||||
e.buff = append(e.buff, event)
|
||||
}
|
||||
|
||||
func (e *eventProcessor) stopped() bool {
|
||||
select {
|
||||
case <-e.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (e *eventProcessor) stop() {
|
||||
close(e.done)
|
||||
e.cond.Signal()
|
||||
}
|
||||
|
||||
// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface
|
||||
// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method.
|
||||
// it also returns a channel you can use to wait for the informers to fully shutdown.
|
||||
func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface, <-chan struct{}) {
|
||||
ch := make(chan watch.Event)
|
||||
w := watch.NewProxyWatcher(ch)
|
||||
e := newEventProcessor(ch)
|
||||
|
||||
indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
e.push(watch.Event{
|
||||
Type: watch.Added,
|
||||
Object: obj.(runtime.Object),
|
||||
})
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
e.push(watch.Event{
|
||||
Type: watch.Modified,
|
||||
Object: new.(runtime.Object),
|
||||
})
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
staleObj, stale := obj.(cache.DeletedFinalStateUnknown)
|
||||
if stale {
|
||||
// We have no means of passing the additional information down using
|
||||
// watch API based on watch.Event but the caller can filter such
|
||||
// objects by checking if metadata.deletionTimestamp is set
|
||||
obj = staleObj.Obj
|
||||
}
|
||||
|
||||
e.push(watch.Event{
|
||||
Type: watch.Deleted,
|
||||
Object: obj.(runtime.Object),
|
||||
})
|
||||
},
|
||||
}, cache.Indexers{})
|
||||
|
||||
go e.run()
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
defer close(doneCh)
|
||||
defer e.stop()
|
||||
informer.Run(w.StopChan())
|
||||
}()
|
||||
|
||||
return indexer, informer, w, doneCh
|
||||
}
|
334
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
334
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
@ -1,334 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// resourceVersionGetter is an interface used to get resource version from events.
|
||||
// We can't reuse an interface from meta otherwise it would be a cyclic dependency and we need just this one method
|
||||
type resourceVersionGetter interface {
|
||||
GetResourceVersion() string
|
||||
}
|
||||
|
||||
// RetryWatcher will make sure that in case the underlying watcher is closed (e.g. due to API timeout or etcd timeout)
|
||||
// it will get restarted from the last point without the consumer even knowing about it.
|
||||
// RetryWatcher does that by inspecting events and keeping track of resourceVersion.
|
||||
// Especially useful when using watch.UntilWithoutRetry where premature termination is causing issues and flakes.
|
||||
// Please note that this is not resilient to etcd cache not having the resource version anymore - you would need to
|
||||
// use Informers for that.
|
||||
type RetryWatcher struct {
|
||||
lastResourceVersion string
|
||||
watcherClient cache.Watcher
|
||||
resultChan chan watch.Event
|
||||
stopChan chan struct{}
|
||||
doneChan chan struct{}
|
||||
minRestartDelay time.Duration
|
||||
stopChanLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewRetryWatcher creates a new RetryWatcher.
|
||||
// It will make sure that watches gets restarted in case of recoverable errors.
|
||||
// The initialResourceVersion will be given to watch method when first called.
|
||||
func NewRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher) (*RetryWatcher, error) {
|
||||
return newRetryWatcher(initialResourceVersion, watcherClient, 1*time.Second)
|
||||
}
|
||||
|
||||
func newRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher, minRestartDelay time.Duration) (*RetryWatcher, error) {
|
||||
switch initialResourceVersion {
|
||||
case "", "0":
|
||||
// TODO: revisit this if we ever get WATCH v2 where it means start "now"
|
||||
// without doing the synthetic list of objects at the beginning (see #74022)
|
||||
return nil, fmt.Errorf("initial RV %q is not supported due to issues with underlying WATCH", initialResourceVersion)
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
rw := &RetryWatcher{
|
||||
lastResourceVersion: initialResourceVersion,
|
||||
watcherClient: watcherClient,
|
||||
stopChan: make(chan struct{}),
|
||||
doneChan: make(chan struct{}),
|
||||
resultChan: make(chan watch.Event, 0),
|
||||
minRestartDelay: minRestartDelay,
|
||||
}
|
||||
|
||||
go rw.receive()
|
||||
return rw, nil
|
||||
}
|
||||
|
||||
func (rw *RetryWatcher) send(event watch.Event) bool {
|
||||
// Writing to an unbuffered channel is blocking operation
|
||||
// and we need to check if stop wasn't requested while doing so.
|
||||
select {
|
||||
case rw.resultChan <- event:
|
||||
return true
|
||||
case <-rw.stopChan:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// doReceive returns true when it is done, false otherwise.
|
||||
// If it is not done the second return value holds the time to wait before calling it again.
|
||||
func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
watcher, err := rw.watcherClient.Watch(metav1.ListOptions{
|
||||
ResourceVersion: rw.lastResourceVersion,
|
||||
AllowWatchBookmarks: true,
|
||||
})
|
||||
// We are very unlikely to hit EOF here since we are just establishing the call,
|
||||
// but it may happen that the apiserver is just shutting down (e.g. being restarted)
|
||||
// This is consistent with how it is handled for informers
|
||||
switch err {
|
||||
case nil:
|
||||
break
|
||||
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
return false, 0
|
||||
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).InfoS("Watch closed with unexpected EOF", "err", err)
|
||||
return false, 0
|
||||
|
||||
default:
|
||||
msg := "Watch failed"
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
klog.V(5).InfoS(msg, "err", err)
|
||||
// Retry
|
||||
return false, 0
|
||||
}
|
||||
|
||||
// Check if the watch failed due to the client not having permission to watch the resource or the credentials
|
||||
// being invalid (e.g. expired token).
|
||||
if apierrors.IsForbidden(err) || apierrors.IsUnauthorized(err) {
|
||||
// Add more detail since the forbidden message returned by the Kubernetes API is just "unknown".
|
||||
klog.ErrorS(err, msg+": ensure the client has valid credentials and watch permissions on the resource")
|
||||
|
||||
if apiStatus, ok := err.(apierrors.APIStatus); ok {
|
||||
statusErr := apiStatus.Status()
|
||||
|
||||
sent := rw.send(watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: &statusErr,
|
||||
})
|
||||
if !sent {
|
||||
// This likely means the RetryWatcher is stopping but return false so the caller to doReceive can
|
||||
// verify this and potentially retry.
|
||||
klog.Error("Failed to send the Unauthorized or Forbidden watch event")
|
||||
|
||||
return false, 0
|
||||
}
|
||||
} else {
|
||||
// This should never happen since apierrors only handles apierrors.APIStatus. Still, this is an
|
||||
// unrecoverable error, so still allow it to return true below.
|
||||
klog.ErrorS(err, msg+": encountered an unexpected Unauthorized or Forbidden error type")
|
||||
}
|
||||
|
||||
return true, 0
|
||||
}
|
||||
|
||||
klog.ErrorS(err, msg)
|
||||
// Retry
|
||||
return false, 0
|
||||
}
|
||||
|
||||
if watcher == nil {
|
||||
klog.ErrorS(nil, "Watch returned nil watcher")
|
||||
// Retry
|
||||
return false, 0
|
||||
}
|
||||
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-rw.stopChan:
|
||||
klog.V(4).InfoS("Stopping RetryWatcher.")
|
||||
return true, 0
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
klog.V(4).InfoS("Failed to get event! Re-creating the watcher.", "resourceVersion", rw.lastResourceVersion)
|
||||
return false, 0
|
||||
}
|
||||
|
||||
// We need to inspect the event and get ResourceVersion out of it
|
||||
switch event.Type {
|
||||
case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark:
|
||||
metaObject, ok := event.Object.(resourceVersionGetter)
|
||||
if !ok {
|
||||
_ = rw.send(watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: &apierrors.NewInternalError(errors.New("retryWatcher: doesn't support resourceVersion")).ErrStatus,
|
||||
})
|
||||
// We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data!
|
||||
return true, 0
|
||||
}
|
||||
|
||||
resourceVersion := metaObject.GetResourceVersion()
|
||||
if resourceVersion == "" {
|
||||
_ = rw.send(watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher: object %#v doesn't support resourceVersion", event.Object)).ErrStatus,
|
||||
})
|
||||
// We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data!
|
||||
return true, 0
|
||||
}
|
||||
|
||||
// All is fine; send the non-bookmark events and update resource version.
|
||||
if event.Type != watch.Bookmark {
|
||||
ok = rw.send(event)
|
||||
if !ok {
|
||||
return true, 0
|
||||
}
|
||||
}
|
||||
rw.lastResourceVersion = resourceVersion
|
||||
|
||||
continue
|
||||
|
||||
case watch.Error:
|
||||
// This round trip allows us to handle unstructured status
|
||||
errObject := apierrors.FromObject(event.Object)
|
||||
statusErr, ok := errObject.(*apierrors.StatusError)
|
||||
if !ok {
|
||||
klog.Error(fmt.Sprintf("Received an error which is not *metav1.Status but %s", dump.Pretty(event.Object)))
|
||||
// Retry unknown errors
|
||||
return false, 0
|
||||
}
|
||||
|
||||
status := statusErr.ErrStatus
|
||||
|
||||
statusDelay := time.Duration(0)
|
||||
if status.Details != nil {
|
||||
statusDelay = time.Duration(status.Details.RetryAfterSeconds) * time.Second
|
||||
}
|
||||
|
||||
switch status.Code {
|
||||
case http.StatusGone:
|
||||
// Never retry RV too old errors
|
||||
_ = rw.send(event)
|
||||
return true, 0
|
||||
|
||||
case http.StatusGatewayTimeout, http.StatusInternalServerError:
|
||||
// Retry
|
||||
return false, statusDelay
|
||||
|
||||
default:
|
||||
// We retry by default. RetryWatcher is meant to proceed unless it is certain
|
||||
// that it can't. If we are not certain, we proceed with retry and leave it
|
||||
// up to the user to timeout if needed.
|
||||
|
||||
// Log here so we have a record of hitting the unexpected error
|
||||
// and we can whitelist some error codes if we missed any that are expected.
|
||||
klog.V(5).Info(fmt.Sprintf("Retrying after unexpected error: %s", dump.Pretty(event.Object)))
|
||||
|
||||
// Retry
|
||||
return false, statusDelay
|
||||
}
|
||||
|
||||
default:
|
||||
klog.Errorf("Failed to recognize Event type %q", event.Type)
|
||||
_ = rw.send(watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher failed to recognize Event type %q", event.Type)).ErrStatus,
|
||||
})
|
||||
// We are unable to restart the watch and have to stop the loop or this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data!
|
||||
return true, 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// receive reads the result from a watcher, restarting it if necessary.
|
||||
func (rw *RetryWatcher) receive() {
|
||||
defer close(rw.doneChan)
|
||||
defer close(rw.resultChan)
|
||||
|
||||
klog.V(4).Info("Starting RetryWatcher.")
|
||||
defer klog.V(4).Info("Stopping RetryWatcher.")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go func() {
|
||||
select {
|
||||
case <-rw.stopChan:
|
||||
cancel()
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// We use non sliding until so we don't introduce delays on happy path when WATCH call
|
||||
// timeouts or gets closed and we need to reestablish it while also avoiding hot loops.
|
||||
wait.NonSlidingUntilWithContext(ctx, func(ctx context.Context) {
|
||||
done, retryAfter := rw.doReceive()
|
||||
if done {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
timer := time.NewTimer(retryAfter)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion)
|
||||
}, rw.minRestartDelay)
|
||||
}
|
||||
|
||||
// ResultChan implements Interface.
|
||||
func (rw *RetryWatcher) ResultChan() <-chan watch.Event {
|
||||
return rw.resultChan
|
||||
}
|
||||
|
||||
// Stop implements Interface.
|
||||
func (rw *RetryWatcher) Stop() {
|
||||
rw.stopChanLock.Lock()
|
||||
defer rw.stopChanLock.Unlock()
|
||||
|
||||
// Prevent closing an already closed channel to prevent a panic
|
||||
select {
|
||||
case <-rw.stopChan:
|
||||
default:
|
||||
close(rw.stopChan)
|
||||
}
|
||||
}
|
||||
|
||||
// Done allows the caller to be notified when Retry watcher stops.
|
||||
func (rw *RetryWatcher) Done() <-chan struct{} {
|
||||
return rw.doneChan
|
||||
}
|
168
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
168
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
@ -1,168 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet,
|
||||
// or an error if the condition failed or detected an error state.
|
||||
type PreconditionFunc func(store cache.Store) (bool, error)
|
||||
|
||||
// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet,
|
||||
// or an error if the condition cannot be checked and should terminate. In general, it is better to define
|
||||
// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed
|
||||
// from false to true).
|
||||
type ConditionFunc func(event watch.Event) (bool, error)
|
||||
|
||||
// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry.
|
||||
var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout")
|
||||
|
||||
// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch
|
||||
// encountered. The first condition that returns an error terminates the watch (and the event is also returned).
|
||||
// If no event has been received, the returned event will be nil.
|
||||
// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
|
||||
// Waits until context deadline or until context is canceled.
|
||||
//
|
||||
// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!!
|
||||
// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error.
|
||||
// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below,
|
||||
// Warning: solving such issues.
|
||||
// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone.
|
||||
func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
var lastEvent *watch.Event
|
||||
for _, condition := range conditions {
|
||||
// check the next condition against the previous event and short circuit waiting for the next watch
|
||||
if lastEvent != nil {
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
continue
|
||||
}
|
||||
}
|
||||
ConditionSucceeded:
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
return lastEvent, ErrWatchClosed
|
||||
}
|
||||
lastEvent = &event
|
||||
|
||||
done, err := condition(event)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
break ConditionSucceeded
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return lastEvent, wait.ErrWaitTimeout
|
||||
}
|
||||
}
|
||||
}
|
||||
return lastEvent, nil
|
||||
}
|
||||
|
||||
// Until wraps the watcherClient's watch function with RetryWatcher making sure that watcher gets restarted in case of errors.
|
||||
// The initialResourceVersion will be given to watch method when first called. It shall not be "" or "0"
|
||||
// given the underlying WATCH call issues (#74022).
|
||||
// Remaining behaviour is identical to function UntilWithoutRetry. (See above.)
|
||||
// Until can deal with API timeouts and lost connections.
|
||||
// It guarantees you to see all events and in the order they happened.
|
||||
// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case.
|
||||
// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing
|
||||
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
|
||||
//
|
||||
// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges").
|
||||
func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
w, err := NewRetryWatcher(initialResourceVersion, watcherClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return UntilWithoutRetry(ctx, w, conditions...)
|
||||
}
|
||||
|
||||
// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced,
|
||||
// and watches the output until each provided condition succeeds, in a way that is identical
|
||||
// to function UntilWithoutRetry. (See above.)
|
||||
// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'.
|
||||
// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will
|
||||
// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple
|
||||
// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will
|
||||
// re-list to recover and you always get an event, if there has been a change, after recovery.
|
||||
// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for
|
||||
// particular object, not between more of them even it's the same resource.
|
||||
// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like:
|
||||
// waiting for object reaching a state, "small" controllers, ...
|
||||
func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
indexer, informer, watcher, done := NewIndexerInformerWatcher(lw, objType)
|
||||
// We need to wait for the internal informers to fully stop so it's easier to reason about
|
||||
// and it works with non-thread safe clients.
|
||||
defer func() { <-done }()
|
||||
// Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and
|
||||
// let UntilWithoutRetry to stop it
|
||||
defer watcher.Stop()
|
||||
|
||||
if precondition != nil {
|
||||
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
|
||||
return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %w", ctx.Err())
|
||||
}
|
||||
|
||||
done, err := precondition(indexer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if done {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return UntilWithoutRetry(ctx, watcher, conditions...)
|
||||
}
|
||||
|
||||
// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration.
|
||||
func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
|
||||
if timeout < 0 {
|
||||
// This should be handled in validation
|
||||
klog.Errorf("Timeout for context shall not be negative!")
|
||||
timeout = 0
|
||||
}
|
||||
|
||||
if timeout == 0 {
|
||||
return context.WithCancel(parent)
|
||||
}
|
||||
|
||||
return context.WithTimeout(parent, timeout)
|
||||
}
|
Reference in New Issue
Block a user