rebase: update k8s.io packages to v0.29.0

Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
Niels de Vos
2023-12-20 13:23:59 +01:00
committed by mergify[bot]
parent 328a264202
commit f080b9e0c9
367 changed files with 21340 additions and 11878 deletions

View File

@ -267,7 +267,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
}
requestInfo, _ := request.RequestInfoFrom(ctx)
metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {
serveWatch(watcher, scope, outputMediaType, req, w, timeout)
serveWatch(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts))
})
return
}

View File

@ -77,6 +77,96 @@ func (lazy *lazyAccept) String() string {
return "unknown"
}
// lazyAPIGroup implements String() string and it will
// lazily get Group from request info.
type lazyAPIGroup struct {
req *http.Request
}
func (lazy *lazyAPIGroup) String() string {
if lazy.req != nil {
ctx := lazy.req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if ok {
return requestInfo.APIGroup
}
}
return "unknown"
}
// lazyAPIVersion implements String() string and it will
// lazily get Group from request info.
type lazyAPIVersion struct {
req *http.Request
}
func (lazy *lazyAPIVersion) String() string {
if lazy.req != nil {
ctx := lazy.req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if ok {
return requestInfo.APIVersion
}
}
return "unknown"
}
// lazyName implements String() string and it will
// lazily get Group from request info.
type lazyName struct {
req *http.Request
}
func (lazy *lazyName) String() string {
if lazy.req != nil {
ctx := lazy.req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if ok {
return requestInfo.Name
}
}
return "unknown"
}
// lazySubresource implements String() string and it will
// lazily get Group from request info.
type lazySubresource struct {
req *http.Request
}
func (lazy *lazySubresource) String() string {
if lazy.req != nil {
ctx := lazy.req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if ok {
return requestInfo.Subresource
}
}
return "unknown"
}
// lazyNamespace implements String() string and it will
// lazily get Group from request info.
type lazyNamespace struct {
req *http.Request
}
func (lazy *lazyNamespace) String() string {
if lazy.req != nil {
ctx := lazy.req.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if ok {
return requestInfo.Namespace
}
}
return "unknown"
}
// lazyAuditID implements Stringer interface to lazily retrieve
// the audit ID associated with the request.
type lazyAuditID struct {

View File

@ -18,7 +18,10 @@ package metrics
import (
"context"
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
type RequestBodyVerb string
@ -35,8 +38,8 @@ var (
RequestBodySizes = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: "apiserver",
Name: "request_body_sizes",
Help: "Apiserver request body sizes broken out by size.",
Name: "request_body_size_bytes",
Help: "Apiserver request body size in bytes broken out by resource and verb.",
// we use 0.05 KB as the smallest bucket with 0.1 KB increments up to the
// apiserver limit.
Buckets: metrics.LinearBuckets(50000, 100000, 31),
@ -46,6 +49,15 @@ var (
)
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
registerMetrics.Do(func() {
legacyregistry.MustRegister(RequestBodySizes)
})
}
func RecordRequestBodySize(ctx context.Context, resource string, verb RequestBodyVerb, size int) {
RequestBodySizes.WithContext(ctx).WithLabelValues(resource, string(verb)).Observe(float64(size))
}

View File

@ -18,8 +18,11 @@ package handlers
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"reflect"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@ -29,48 +32,228 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/metrics"
endpointsrequest "k8s.io/apiserver/pkg/endpoints/request"
klog "k8s.io/klog/v2"
)
// transformObject takes the object as returned by storage and ensures it is in
// the client's desired form, as well as ensuring any API level fields like self-link
// are properly set.
func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) {
if co, ok := obj.(runtime.CacheableObject); ok {
if mediaType.Convert != nil {
// Non-nil mediaType.Convert means that some conversion of the object
// has to happen. Currently conversion may potentially modify the
// object or assume something about it (e.g. asTable operates on
// reflection, which won't work for any wrapper).
// To ensure it will work correctly, let's operate on base objects
// and not cache it for now.
//
// TODO: Long-term, transformObject should be changed so that it
// implements runtime.Encoder interface.
return doTransformObject(ctx, co.GetObject(), opts, mediaType, scope, req)
}
}
return doTransformObject(ctx, obj, opts, mediaType, scope, req)
// watchEmbeddedEncoder performs encoding of the embedded object.
//
// NOTE: watchEmbeddedEncoder is NOT thread-safe.
type watchEmbeddedEncoder struct {
encoder runtime.Encoder
ctx context.Context
// target, if non-nil, configures transformation type.
// The other options are ignored if target is nil.
target *schema.GroupVersionKind
tableOptions *metav1.TableOptions
scope *RequestScope
// identifier of the encoder, computed lazily
identifier runtime.Identifier
}
func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) {
func newWatchEmbeddedEncoder(ctx context.Context, encoder runtime.Encoder, target *schema.GroupVersionKind, tableOptions *metav1.TableOptions, scope *RequestScope) *watchEmbeddedEncoder {
return &watchEmbeddedEncoder{
encoder: encoder,
ctx: ctx,
target: target,
tableOptions: tableOptions,
scope: scope,
}
}
// Encode implements runtime.Encoder interface.
func (e *watchEmbeddedEncoder) Encode(obj runtime.Object, w io.Writer) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(e.Identifier(), e.doEncode, w)
}
return e.doEncode(obj, w)
}
func (e *watchEmbeddedEncoder) doEncode(obj runtime.Object, w io.Writer) error {
result, err := doTransformObject(e.ctx, obj, e.tableOptions, e.target, e.scope)
if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err))
result = obj
}
// When we are tranforming to a table, use the original table options when
// we should print headers only on the first object - headers should be
// omitted on subsequent events.
if e.tableOptions != nil && !e.tableOptions.NoHeaders {
e.tableOptions.NoHeaders = true
// With options change, we should recompute the identifier.
// Clearing this will trigger lazy recompute when needed.
e.identifier = ""
}
return e.encoder.Encode(result, w)
}
// Identifier implements runtime.Encoder interface.
func (e *watchEmbeddedEncoder) Identifier() runtime.Identifier {
if e.identifier == "" {
e.identifier = e.embeddedIdentifier()
}
return e.identifier
}
type watchEmbeddedEncoderIdentifier struct {
Name string `json:"name,omitempty"`
Encoder string `json:"encoder,omitempty"`
Target string `json:"target,omitempty"`
Options metav1.TableOptions `json:"options,omitempty"`
NoHeaders bool `json:"noHeaders,omitempty"`
}
func (e *watchEmbeddedEncoder) embeddedIdentifier() runtime.Identifier {
if e.target == nil {
// If no conversion is performed, we effective only use
// the embedded identifier.
return e.encoder.Identifier()
}
identifier := watchEmbeddedEncoderIdentifier{
Name: "watch-embedded",
Encoder: string(e.encoder.Identifier()),
Target: e.target.String(),
}
if e.target.Kind == "Table" && e.tableOptions != nil {
identifier.Options = *e.tableOptions
identifier.NoHeaders = e.tableOptions.NoHeaders
}
result, err := json.Marshal(identifier)
if err != nil {
klog.Fatalf("Failed marshaling identifier for watchEmbeddedEncoder: %v", err)
}
return runtime.Identifier(result)
}
// watchEncoder performs encoding of the watch events.
//
// NOTE: watchEncoder is NOT thread-safe.
type watchEncoder struct {
ctx context.Context
kind schema.GroupVersionKind
embeddedEncoder runtime.Encoder
encoder runtime.Encoder
framer io.Writer
buffer runtime.Splice
eventBuffer runtime.Splice
currentEmbeddedIdentifier runtime.Identifier
identifiers map[watch.EventType]runtime.Identifier
}
func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer) *watchEncoder {
return &watchEncoder{
ctx: ctx,
kind: kind,
embeddedEncoder: embeddedEncoder,
encoder: encoder,
framer: framer,
buffer: runtime.NewSpliceBuffer(),
eventBuffer: runtime.NewSpliceBuffer(),
}
}
// Encode encodes a given watch event.
// NOTE: if events object is implementing the CacheableObject interface,
//
// the serialized version is cached in that object [not the event itself].
func (e *watchEncoder) Encode(event watch.Event) error {
encodeFunc := func(obj runtime.Object, w io.Writer) error {
return e.doEncode(obj, event, w)
}
if co, ok := event.Object.(runtime.CacheableObject); ok {
return co.CacheEncode(e.identifier(event.Type), encodeFunc, e.framer)
}
return encodeFunc(event.Object, e.framer)
}
func (e *watchEncoder) doEncode(obj runtime.Object, event watch.Event, w io.Writer) error {
defer e.buffer.Reset()
if err := e.embeddedEncoder.Encode(obj, e.buffer); err != nil {
return fmt.Errorf("unable to encode watch object %T: %v", obj, err)
}
// ContentType is not required here because we are defaulting to the serializer type.
outEvent := &metav1.WatchEvent{
Type: string(event.Type),
Object: runtime.RawExtension{Raw: e.buffer.Bytes()},
}
metrics.WatchEventsSizes.WithContext(e.ctx).WithLabelValues(e.kind.Group, e.kind.Version, e.kind.Kind).Observe(float64(len(outEvent.Object.Raw)))
defer e.eventBuffer.Reset()
if err := e.encoder.Encode(outEvent, e.eventBuffer); err != nil {
return fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e)
}
_, err := w.Write(e.eventBuffer.Bytes())
return err
}
type watchEncoderIdentifier struct {
Name string `json:"name,omitempty"`
EmbeddedEncoder string `json:"embeddedEncoder,omitempty"`
Encoder string `json:"encoder,omitempty"`
EventType string `json:"eventType,omitempty"`
}
func (e *watchEncoder) identifier(eventType watch.EventType) runtime.Identifier {
// We need to take into account that in embeddedEncoder includes table
// transformer, then its identifier is dynamic. As a result, whenever
// the identifier of embeddedEncoder changes, we need to invalidate the
// whole identifiers cache.
// TODO(wojtek-t): Can we optimize it somehow?
if e.currentEmbeddedIdentifier != e.embeddedEncoder.Identifier() {
e.currentEmbeddedIdentifier = e.embeddedEncoder.Identifier()
e.identifiers = map[watch.EventType]runtime.Identifier{}
}
if _, ok := e.identifiers[eventType]; !ok {
e.identifiers[eventType] = e.typeIdentifier(eventType)
}
return e.identifiers[eventType]
}
func (e *watchEncoder) typeIdentifier(eventType watch.EventType) runtime.Identifier {
// The eventType is a non-standard pattern. This is coming from the fact
// that we're effectively serializing the whole watch event, but storing
// it in serializations of the Object within the watch event.
identifier := watchEncoderIdentifier{
Name: "watch",
EmbeddedEncoder: string(e.embeddedEncoder.Identifier()),
Encoder: string(e.encoder.Identifier()),
EventType: string(eventType),
}
result, err := json.Marshal(identifier)
if err != nil {
klog.Fatalf("Failed marshaling identifier for watchEncoder: %v", err)
}
return runtime.Identifier(result)
}
// doTransformResponseObject is used for handling all requests, including watch.
func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, target *schema.GroupVersionKind, scope *RequestScope) (runtime.Object, error) {
if _, ok := obj.(*metav1.Status); ok {
return obj, nil
}
// ensure that for empty lists we don't return <nil> items.
// This is safe to modify without deep-copying the object, as
// List objects themselves are never cached.
if meta.IsListType(obj) && meta.LenList(obj) == 0 {
if err := meta.SetList(obj, []runtime.Object{}); err != nil {
return nil, err
}
}
switch target := mediaType.Convert; {
switch {
case target == nil:
// If we ever change that from a no-op, the identifier of
// the watchEmbeddedEncoder has to be adjusted accordingly.
return obj, nil
case target.Kind == "PartialObjectMetadata":
@ -128,6 +311,7 @@ func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.Media
// transformResponseObject takes an object loaded from storage and performs any necessary transformations.
// Will write the complete response object.
// transformResponseObject is used only for handling non-streaming requests.
func transformResponseObject(ctx context.Context, scope *RequestScope, req *http.Request, w http.ResponseWriter, statusCode int, mediaType negotiation.MediaTypeOptions, result runtime.Object) {
options, err := optionsForTransform(mediaType, req)
if err != nil {
@ -135,9 +319,19 @@ func transformResponseObject(ctx context.Context, scope *RequestScope, req *http
return
}
// ensure that for empty lists we don't return <nil> items.
// This is safe to modify without deep-copying the object, as
// List objects themselves are never cached.
if meta.IsListType(result) && meta.LenList(result) == 0 {
if err := meta.SetList(result, []runtime.Object{}); err != nil {
scope.err(err, w, req)
return
}
}
var obj runtime.Object
do := func() {
obj, err = transformObject(ctx, result, options, mediaType, scope, req)
obj, err = doTransformObject(ctx, result, options, mediaType.Convert, scope)
}
endpointsrequest.TrackTransformResponseObjectLatency(ctx, do)

View File

@ -27,6 +27,11 @@ func traceFields(req *http.Request) []attribute.KeyValue {
attribute.Stringer("accept", &lazyAccept{req: req}),
attribute.Stringer("audit-id", &lazyAuditID{req: req}),
attribute.Stringer("client", &lazyClientIP{req: req}),
attribute.Stringer("api-group", &lazyAPIGroup{req: req}),
attribute.Stringer("api-version", &lazyAPIVersion{req: req}),
attribute.Stringer("name", &lazyName{req: req}),
attribute.Stringer("subresource", &lazySubresource{req: req}),
attribute.Stringer("namespace", &lazyNamespace{req: req}),
attribute.String("protocol", req.Proto),
attribute.Stringer("resource", &lazyResource{req: req}),
attribute.Stringer("scope", &lazyScope{req: req}),

View File

@ -19,9 +19,7 @@ package handlers
import (
"bytes"
"fmt"
"io"
"net/http"
"reflect"
"time"
"golang.org/x/net/websocket"
@ -29,13 +27,15 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
"k8s.io/apimachinery/pkg/util/httpstream/wsstream"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/metrics"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
// nothing will ever be sent down this channel
@ -63,7 +63,7 @@ func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) {
// serveWatch will serve a watch response.
// TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled.
func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) {
func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) {
defer watcher.Stop()
options, err := optionsForTransform(mediaTypeOptions, req)
@ -92,6 +92,8 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
mediaType += ";stream=watch"
}
ctx := req.Context()
// locate the appropriate embedded encoder based on the transform
var embeddedEncoder runtime.Encoder
contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req)
@ -106,13 +108,41 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
embeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion())
}
var memoryAllocator runtime.MemoryAllocator
if encoderWithAllocator, supportsAllocator := embeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator {
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
defer runtime.AllocatorPool.Put(memoryAllocator)
embeddedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
}
var tableOptions *metav1.TableOptions
if options != nil {
if passedOptions, ok := options.(*metav1.TableOptions); ok {
tableOptions = passedOptions
} else {
scope.err(fmt.Errorf("unexpected options type: %T", options), w, req)
return
}
}
embeddedEncoder = newWatchEmbeddedEncoder(ctx, embeddedEncoder, mediaTypeOptions.Convert, tableOptions, scope)
if encoderWithAllocator, supportsAllocator := encoder.(runtime.EncoderWithAllocator); supportsAllocator {
if memoryAllocator == nil {
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
defer runtime.AllocatorPool.Put(memoryAllocator)
}
encoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
}
var serverShuttingDownCh <-chan struct{}
if signals := apirequest.ServerShutdownSignalFrom(req.Context()); signals != nil {
serverShuttingDownCh = signals.ShuttingDown()
}
ctx := req.Context()
server := &WatchServer{
Watching: watcher,
Scope: scope,
@ -123,23 +153,10 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
Encoder: encoder,
EmbeddedEncoder: embeddedEncoder,
Fixup: func(obj runtime.Object) runtime.Object {
result, err := transformObject(ctx, obj, options, mediaTypeOptions, scope, req)
if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err))
return obj
}
// When we are transformed to a table, use the table options as the state for whether we
// should print headers - on watch, we only want to print table headers on the first object
// and omit them on subsequent events.
if tableOptions, ok := options.(*metav1.TableOptions); ok {
tableOptions.NoHeaders = true
}
return result
},
TimeoutFactory: &realTimeoutFactory{timeout},
ServerShuttingDownCh: serverShuttingDownCh,
metricsScope: metricsScope,
}
server.ServeHTTP(w, req)
@ -160,11 +177,11 @@ type WatchServer struct {
Encoder runtime.Encoder
// used to encode the nested object in the watch stream
EmbeddedEncoder runtime.Encoder
// used to correct the object before we send it to the serializer
Fixup func(runtime.Object) runtime.Object
TimeoutFactory TimeoutFactory
ServerShuttingDownCh <-chan struct{}
metricsScope string
}
// ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked
@ -195,17 +212,6 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
return
}
var e streaming.Encoder
var memoryAllocator runtime.MemoryAllocator
if encoder, supportsAllocator := s.Encoder.(runtime.EncoderWithAllocator); supportsAllocator {
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
defer runtime.AllocatorPool.Put(memoryAllocator)
e = streaming.NewEncoderWithAllocator(framer, encoder, memoryAllocator)
} else {
e = streaming.NewEncoder(framer, s.Encoder)
}
// ensure the connection times out
timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh()
defer cleanup()
@ -216,26 +222,10 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusOK)
flusher.Flush()
var unknown runtime.Unknown
internalEvent := &metav1.InternalEvent{}
outEvent := &metav1.WatchEvent{}
buf := runtime.NewSpliceBuffer()
watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer)
ch := s.Watching.ResultChan()
done := req.Context().Done()
embeddedEncodeFn := s.EmbeddedEncoder.Encode
if encoder, supportsAllocator := s.EmbeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator {
if memoryAllocator == nil {
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
defer runtime.AllocatorPool.Put(memoryAllocator)
}
embeddedEncodeFn = func(obj runtime.Object, w io.Writer) error {
return encoder.EncodeWithAllocator(obj, w, memoryAllocator)
}
}
for {
select {
case <-s.ServerShuttingDownCh:
@ -257,42 +247,20 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
return
}
metrics.WatchEvents.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc()
isWatchListLatencyRecordingRequired := shouldRecordWatchListLatency(event)
obj := s.Fixup(event.Object)
if err := embeddedEncodeFn(obj, buf); err != nil {
// unexpected error
utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err))
return
}
// ContentType is not required here because we are defaulting to the serializer
// type
unknown.Raw = buf.Bytes()
event.Object = &unknown
metrics.WatchEventsSizes.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Observe(float64(len(unknown.Raw)))
*outEvent = metav1.WatchEvent{}
// create the external type directly and encode it. Clients will only recognize the serialization we provide.
// The internal event is being reused, not reallocated so its just a few extra assignments to do it this way
// and we get the benefit of using conversion functions which already have to stay in sync
*internalEvent = metav1.InternalEvent(event)
err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err))
// client disconnect.
return
}
if err := e.Encode(outEvent); err != nil {
utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e))
if err := watchEncoder.Encode(event); err != nil {
utilruntime.HandleError(err)
// client disconnect.
return
}
if len(ch) == 0 {
flusher.Flush()
}
buf.Reset()
if isWatchListLatencyRecordingRequired {
metrics.RecordWatchListLatency(req.Context(), s.Scope.Resource, s.metricsScope)
}
}
}
}
@ -326,10 +294,10 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
// End of results.
return
}
obj := s.Fixup(event.Object)
if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil {
if err := s.EmbeddedEncoder.Encode(event.Object, buf); err != nil {
// unexpected error
utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err))
utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", event.Object, err))
return
}
@ -371,3 +339,19 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
}
}
}
func shouldRecordWatchListLatency(event watch.Event) bool {
if event.Type != watch.Bookmark || !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) {
return false
}
// as of today the initial-events-end annotation is added only to a single event
// by the watch cache and only when certain conditions are met
//
// for more please read https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list
hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to determine if the obj has the required annotation for measuring watchlist latency, obj %T: %v", event.Object, err))
return false
}
return hasAnnotation
}