rebase: update K8s packages to v0.32.1

Update K8s packages in go.mod to v0.32.1

Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
Praveen M
2025-01-16 09:41:46 +05:30
committed by mergify[bot]
parent 5aef21ea4e
commit 7eb99fc6c9
2442 changed files with 273386 additions and 47788 deletions

View File

@ -17,6 +17,7 @@ limitations under the License.
package discovery
import (
"context"
"net/http"
"sync"
@ -33,12 +34,21 @@ import (
// GroupManager is an interface that allows dynamic mutation of the existing webservice to handle
// API groups being added or removed.
type GroupManager interface {
GroupLister
AddGroup(apiGroup metav1.APIGroup)
RemoveGroup(groupName string)
ServeHTTP(resp http.ResponseWriter, req *http.Request)
WebService() *restful.WebService
}
// GroupLister knows how to list APIGroups for discovery.
type GroupLister interface {
// Groups returns APIGroups for discovery, filling in ServerAddressByClientCIDRs
// based on data in req.
Groups(ctx context.Context, req *http.Request) ([]metav1.APIGroup, error)
}
// rootAPIsHandler creates a webservice serving api group discovery.
// The list of APIGroups may change while the server is running because additional resources
// are registered or removed. It is not safe to cache the values.
@ -94,24 +104,40 @@ func (s *rootAPIsHandler) RemoveGroup(groupName string) {
}
}
func (s *rootAPIsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
func (s *rootAPIsHandler) Groups(ctx context.Context, req *http.Request) ([]metav1.APIGroup, error) {
s.lock.RLock()
defer s.lock.RUnlock()
return s.groupsLocked(ctx, req), nil
}
// groupsLocked returns the APIGroupList discovery information for this handler.
// The caller must hold the lock before invoking this method to avoid data races.
func (s *rootAPIsHandler) groupsLocked(ctx context.Context, req *http.Request) []metav1.APIGroup {
clientIP := utilnet.GetClientIP(req)
serverCIDR := s.addresses.ServerAddressByClientCIDRs(clientIP)
orderedGroups := []metav1.APIGroup{}
for _, groupName := range s.apiGroupNames {
orderedGroups = append(orderedGroups, s.apiGroups[groupName])
}
clientIP := utilnet.GetClientIP(req)
serverCIDR := s.addresses.ServerAddressByClientCIDRs(clientIP)
groups := make([]metav1.APIGroup, len(orderedGroups))
for i := range orderedGroups {
groups[i] = orderedGroups[i]
groups[i].ServerAddressByClientCIDRs = serverCIDR
}
responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, &metav1.APIGroupList{Groups: groups}, false)
return groups
}
func (s *rootAPIsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
s.lock.RLock()
defer s.lock.RUnlock()
groupList := metav1.APIGroupList{Groups: s.groupsLocked(req.Context(), req)}
responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, &groupList, false)
}
func (s *rootAPIsHandler) restfulHandle(req *restful.Request, resp *restful.Response) {

View File

@ -54,6 +54,7 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed
}
standardRequestHeaderConfig := &authenticatorfactory.RequestHeaderConfig{
UsernameHeaders: headerrequest.StaticStringSlice{"X-Remote-User"},
UIDHeaders: headerrequest.StaticStringSlice{"X-Remote-Uid"},
GroupHeaders: headerrequest.StaticStringSlice{"X-Remote-Group"},
ExtraHeaderPrefixes: headerrequest.StaticStringSlice{"X-Remote-Extra-"},
}
@ -90,6 +91,7 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed
headerrequest.ClearAuthenticationHeaders(
req.Header,
standardRequestHeaderConfig.UsernameHeaders,
standardRequestHeaderConfig.UIDHeaders,
standardRequestHeaderConfig.GroupHeaders,
standardRequestHeaderConfig.ExtraHeaderPrefixes,
)
@ -99,6 +101,7 @@ func withAuthentication(handler http.Handler, auth authenticator.Request, failed
headerrequest.ClearAuthenticationHeaders(
req.Header,
requestHeaderConfig.UsernameHeaders,
requestHeaderConfig.UIDHeaders,
requestHeaderConfig.GroupHeaders,
requestHeaderConfig.ExtraHeaderPrefixes,
)

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/managedfields"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/discovery"
@ -71,9 +70,6 @@ type APIGroupVersion struct {
// version (for when the inevitable meta/v2 group emerges).
MetaGroupVersion *schema.GroupVersion
// RootScopedKinds are the root scoped kinds for the primary GroupVersion
RootScopedKinds sets.String
// Serializer is used to determine how to convert responses from API methods into bytes to send over
// the wire.
Serializer runtime.NegotiatedSerializer

View File

@ -55,6 +55,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Create", traceFields(req)...)
req = req.WithContext(ctx)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)

View File

@ -30,19 +30,27 @@ import (
metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/apihelpers"
"k8s.io/apiserver/pkg/util/dryrun"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
)
// DeleteResource returns a function that will handle a resource deletion
@ -52,6 +60,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
req = req.WithContext(ctx)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
@ -84,7 +93,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs)
s, err := negotiation.NegotiateInputSerializer(req, false, apihelpers.GetMetaInternalVersionCodecs())
if err != nil {
scope.err(err, w, req)
return
@ -92,7 +101,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
obj, gvk, err := apihelpers.GetMetaInternalVersionCodecs().DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, w, req)
return
@ -104,7 +113,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc
span.AddEvent("Decoded delete options")
objGV := gvk.GroupVersion()
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs)
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, apihelpers.GetMetaInternalVersionCodecs())
span.AddEvent("Recorded the audit event")
} else {
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
@ -114,6 +123,9 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc
}
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) && options != nil {
options.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
}
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
scope.err(err, w, req)
@ -121,10 +133,36 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestSc
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
span.AddEvent("About to delete object from database")
wasDeleted := true
userInfo, _ := request.UserFrom(ctx)
staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo)
if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) {
if options != nil && ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) {
// let's make sure that the audit will reflect that this delete request
// was tried with ignoreStoreReadErrorWithClusterBreakingPotential enabled
audit.AddAuditAnnotation(ctx, "apiserver.k8s.io/unsafe-delete-ignore-read-error", "")
p, ok := r.(rest.CorruptObjectDeleterProvider)
if !ok || p.GetCorruptObjDeleter() == nil {
// this is a developer error
scope.err(errors.NewInternalError(fmt.Errorf("no unsafe deleter provided, can not honor ignoreStoreReadErrorWithClusterBreakingPotential")), w, req)
return
}
if scope.Authorizer == nil {
scope.err(errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize unsafe delete")), w, req)
return
}
if err := authorizeUnsafeDelete(ctx, staticAdmissionAttrs, scope.Authorizer); err != nil {
scope.err(err, w, req)
return
}
r = p.GetCorruptObjDeleter()
}
}
span.AddEvent("About to delete object from database")
wasDeleted := true
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options)
wasDeleted = deleted
@ -172,6 +210,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
req = req.WithContext(ctx)
defer span.End(500 * time.Millisecond)
namespace, err := scope.Namer.Namespace(req)
@ -229,7 +268,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc
}
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
if len(body) > 0 {
s, err := negotiation.NegotiateInputSerializer(req, false, metainternalversionscheme.Codecs)
s, err := negotiation.NegotiateInputSerializer(req, false, apihelpers.GetMetaInternalVersionCodecs())
if err != nil {
scope.err(err, w, req)
return
@ -237,7 +276,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
obj, gvk, err := metainternalversionscheme.Codecs.DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
obj, gvk, err := apihelpers.GetMetaInternalVersionCodecs().DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
if err != nil {
scope.err(err, w, req)
return
@ -248,7 +287,7 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc
}
objGV := gvk.GroupVersion()
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, metainternalversionscheme.Codecs)
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, apihelpers.GetMetaInternalVersionCodecs())
} else {
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
err = errors.NewBadRequest(err.Error())
@ -257,11 +296,26 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc
}
}
}
if !utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) && options != nil {
options.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
}
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
scope.err(err, w, req)
return
}
if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) {
if options != nil && ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) {
fieldErrList := field.ErrorList{
field.Invalid(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), true, "is not allowed with DELETECOLLECTION, try again after removing the option"),
}
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", fieldErrList)
scope.err(err, w, req)
return
}
}
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
admit = admission.WithAudit(admit)
@ -292,3 +346,77 @@ func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestSc
transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result)
}
}
// authorizeUnsafeDelete ensures that the user has permission to do
// 'unsafe-delete-ignore-read-errors' on the resource being deleted when
// ignoreStoreReadErrorWithClusterBreakingPotential is enabled
func authorizeUnsafeDelete(ctx context.Context, attr admission.Attributes, authz authorizer.Authorizer) (err error) {
if attr.GetOperation() != admission.Delete || attr.GetOperationOptions() == nil {
return nil
}
options, ok := attr.GetOperationOptions().(*metav1.DeleteOptions)
if !ok {
return errors.NewInternalError(fmt.Errorf("expected an option of type: %T, but got: %T", &metav1.DeleteOptions{}, attr.GetOperationOptions()))
}
if !ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) {
return nil
}
requestInfo, found := request.RequestInfoFrom(ctx)
if !found {
return admission.NewForbidden(attr, fmt.Errorf("no RequestInfo found in the context"))
}
if !requestInfo.IsResourceRequest || len(attr.GetSubresource()) > 0 {
return admission.NewForbidden(attr, fmt.Errorf("ignoreStoreReadErrorWithClusterBreakingPotential delete option is not allowed on a subresource or non-resource request"))
}
// if we are here, IgnoreStoreReadErrorWithClusterBreakingPotential
// is set to true in the delete options, the user must have permission
// to do 'unsafe-delete-ignore-read-errors' on the given resource.
record := authorizer.AttributesRecord{
User: attr.GetUserInfo(),
Verb: "unsafe-delete-ignore-read-errors",
Namespace: attr.GetNamespace(),
Name: attr.GetName(),
APIGroup: attr.GetResource().Group,
APIVersion: attr.GetResource().Version,
Resource: attr.GetResource().Resource,
ResourceRequest: true,
}
// TODO: can't use ResourceAttributesFrom from k8s.io/kubernetes/pkg/registry/authorization/util
// due to prevent staging --> k8s.io/kubernetes dep issue
if utilfeature.DefaultFeatureGate.Enabled(features.AuthorizeWithSelectors) {
if len(requestInfo.FieldSelector) > 0 {
fieldSelector, err := fields.ParseSelector(requestInfo.FieldSelector)
if err != nil {
record.FieldSelectorRequirements, record.FieldSelectorParsingErr = nil, err
} else {
if requirements := fieldSelector.Requirements(); len(requirements) > 0 {
record.FieldSelectorRequirements, record.FieldSelectorParsingErr = fieldSelector.Requirements(), nil
}
}
}
if len(requestInfo.LabelSelector) > 0 {
labelSelector, err := labels.Parse(requestInfo.LabelSelector)
if err != nil {
record.LabelSelectorRequirements, record.LabelSelectorParsingErr = nil, err
} else {
if requirements, _ /*selectable*/ := labelSelector.Requirements(); len(requirements) > 0 {
record.LabelSelectorRequirements, record.LabelSelectorParsingErr = requirements, nil
}
}
}
}
decision, reason, err := authz.Authorize(ctx, record)
if err != nil {
err = fmt.Errorf("error while checking permission for %q, %w", record.Verb, err)
klog.FromContext(ctx).V(1).Error(err, "failed to authorize")
return admission.NewForbidden(attr, err)
}
if decision == authorizer.DecisionAllow {
return nil
}
return admission.NewForbidden(attr, fmt.Errorf("not permitted to do %q, reason: %s", record.Verb, reason))
}

View File

@ -45,6 +45,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
)
// getterFunc performs a get request with the given context and object name. The request
@ -57,6 +58,7 @@ func getResourceHandler(scope *RequestScope, getter getterFunc) http.HandlerFunc
return func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
ctx, span := tracing.Start(ctx, "Get", traceFields(req)...)
req = req.WithContext(ctx)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
@ -171,6 +173,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "List", traceFields(req)...)
req = req.WithContext(ctx)
namespace, err := scope.Namer.Namespace(req)
if err != nil {
@ -185,15 +188,8 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
if err != nil {
hasName = false
}
ctx = request.WithNamespace(ctx, namespace)
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
if err != nil {
scope.err(err, w, req)
return
}
opts := metainternalversion.ListOptions{}
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil {
err = errors.NewBadRequest(err.Error())
@ -208,6 +204,17 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
return
}
var restrictions negotiation.EndpointRestrictions
restrictions = scope
if isListWatchRequest(opts) {
restrictions = &watchListEndpointRestrictions{scope}
}
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, restrictions)
if err != nil {
scope.err(err, w, req)
return
}
// transform fields
// TODO: DecodeParametersInto should do this.
if opts.FieldSelector != nil {
@ -258,6 +265,16 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
if timeout == 0 && minRequestTimeout > 0 {
timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0))
}
var emptyVersionedList runtime.Object
if isListWatchRequest(opts) {
emptyVersionedList, err = scope.Convertor.ConvertToVersion(r.NewList(), scope.Kind.GroupVersion())
if err != nil {
scope.err(errors.NewInternalError(err), w, req)
return
}
}
klog.V(3).InfoS("Starting watch", "path", req.URL.Path, "resourceVersion", opts.ResourceVersion, "labels", opts.LabelSelector, "fields", opts.FieldSelector, "timeout", timeout)
ctx, cancel := context.WithTimeout(ctx, timeout)
defer func() { cancel() }()
@ -266,7 +283,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
scope.err(err, w, req)
return
}
handler, err := serveWatchHandler(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts))
handler, err := serveWatchHandler(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts), emptyVersionedList)
if err != nil {
scope.err(err, w, req)
return
@ -307,3 +324,18 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result)
}
}
type watchListEndpointRestrictions struct {
negotiation.EndpointRestrictions
}
func (e *watchListEndpointRestrictions) AllowsMediaTypeTransform(mimeType, mimeSubType string, target *schema.GroupVersionKind) bool {
if target != nil && target.Kind == "Table" {
return false
}
return e.EndpointRestrictions.AllowsMediaTypeTransform(mimeType, mimeSubType, target)
}
func isListWatchRequest(opts metainternalversion.ListOptions) bool {
return utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && ptr.Deref(opts.SendInitialEvents, false) && opts.AllowWatchBookmarks
}

View File

@ -26,6 +26,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
// MediaTypesForSerializer returns a list of media and stream media types for the server.
@ -33,6 +35,10 @@ func MediaTypesForSerializer(ns runtime.NegotiatedSerializer) (mediaTypes, strea
for _, info := range ns.SupportedMediaTypes() {
mediaTypes = append(mediaTypes, info.MediaType)
if info.StreamSerializer != nil {
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) && info.MediaType == runtime.ContentTypeCBOR {
streamMediaTypes = append(streamMediaTypes, runtime.ContentTypeCBORSequence)
continue
}
// stream=watch is the existing mime-type parameter for watch
streamMediaTypes = append(streamMediaTypes, info.MediaType+";stream=watch")
}

View File

@ -35,9 +35,11 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/managedfields"
"k8s.io/apimachinery/pkg/util/mergepatch"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/validation/field"
@ -50,8 +52,10 @@ import (
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
)
@ -66,6 +70,7 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Patch", traceFields(req)...)
req = req.WithContext(ctx)
defer span.End(500 * time.Millisecond)
// Do this first, otherwise name extraction can fail for unrecognized content types
@ -128,10 +133,25 @@ func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interfac
audit.LogRequestPatch(req.Context(), patchBytes)
span.AddEvent("Recorded the audit event")
baseContentType := runtime.ContentTypeJSON
if patchType == types.ApplyPatchType {
var baseContentType string
switch patchType {
case types.ApplyYAMLPatchType:
baseContentType = runtime.ContentTypeYAML
case types.ApplyCBORPatchType:
if !utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
// This request should have already been rejected by the
// Content-Type allowlist check. Return 500 because assumptions are
// already broken and the feature is not GA.
utilruntime.HandleErrorWithContext(req.Context(), nil, "The patch content-type allowlist check should have made this unreachable.")
scope.err(errors.NewInternalError(errors.NewInternalError(fmt.Errorf("unexpected patch type: %v", patchType))), w, req)
return
}
baseContentType = runtime.ContentTypeCBOR
default:
baseContentType = runtime.ContentTypeJSON
}
s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), baseContentType)
if !ok {
scope.err(fmt.Errorf("no serializer defined for %v", baseContentType), w, req)
@ -451,6 +471,20 @@ func (p *smpPatcher) createNewObject(_ context.Context) (runtime.Object, error)
return nil, errors.NewNotFound(p.resource.GroupResource(), p.name)
}
func newApplyPatcher(p *patcher, fieldManager *managedfields.FieldManager, unmarshalFn, unmarshalStrictFn func([]byte, interface{}) error) *applyPatcher {
return &applyPatcher{
fieldManager: fieldManager,
patch: p.patchBytes,
options: p.options,
creater: p.creater,
kind: p.kind,
userAgent: p.userAgent,
validationDirective: p.validationDirective,
unmarshalFn: unmarshalFn,
unmarshalStrictFn: unmarshalStrictFn,
}
}
type applyPatcher struct {
patch []byte
options *metav1.PatchOptions
@ -459,6 +493,8 @@ type applyPatcher struct {
fieldManager *managedfields.FieldManager
userAgent string
validationDirective string
unmarshalFn func(data []byte, v interface{}) error
unmarshalStrictFn func(data []byte, v interface{}) error
}
func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context, obj runtime.Object) (runtime.Object, error) {
@ -471,7 +507,7 @@ func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context,
}
patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal(p.patch, &patchObj.Object); err != nil {
if err := p.unmarshalFn(p.patch, &patchObj.Object); err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err))
}
@ -483,7 +519,7 @@ func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context,
// TODO: spawn something to track deciding whether a fieldValidation=Strict
// fatal error should return before an error from the apply operation
if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn {
if err := yaml.UnmarshalStrict(p.patch, &map[string]interface{}{}); err != nil {
if err := p.unmarshalStrictFn(p.patch, &map[string]interface{}{}); err != nil {
if p.validationDirective == metav1.FieldValidationStrict {
return nil, errors.NewBadRequest(fmt.Sprintf("error strict decoding YAML: %v", err))
}
@ -633,16 +669,21 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti
fieldManager: scope.FieldManager,
}
// this case is unreachable if ServerSideApply is not enabled because we will have already rejected the content type
case types.ApplyPatchType:
p.mechanism = &applyPatcher{
fieldManager: scope.FieldManager,
patch: p.patchBytes,
options: p.options,
creater: p.creater,
kind: p.kind,
userAgent: p.userAgent,
validationDirective: p.validationDirective,
case types.ApplyYAMLPatchType:
p.mechanism = newApplyPatcher(p, scope.FieldManager, yaml.Unmarshal, yaml.UnmarshalStrict)
p.forceAllowCreate = true
case types.ApplyCBORPatchType:
if !utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
utilruntime.HandleErrorWithContext(context.TODO(), nil, "CBOR apply requests should be rejected before reaching this point unless the feature gate is enabled.")
return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType)
}
// The strict and non-strict funcs are the same here because any CBOR map with
// duplicate keys is invalid and always rejected outright regardless of strictness
// mode, and unknown field errors can't occur in practice because the type of the
// destination value for unmarshaling an apply configuration is always
// "unstructured".
p.mechanism = newApplyPatcher(p, scope.FieldManager, cbor.Unmarshal, cbor.Unmarshal)
p.forceAllowCreate = true
default:
return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType)
@ -669,7 +710,7 @@ func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runti
result, err := requestFunc()
// If the object wasn't committed to storage because it's serialized size was too large,
// it is safe to remove managedFields (which can be large) and try again.
if isTooLargeError(err) && p.patchType != types.ApplyPatchType {
if isTooLargeError(err) && p.patchType != types.ApplyYAMLPatchType && p.patchType != types.ApplyCBORPatchType {
if _, accessorErr := meta.Accessor(p.restPatcher.New()); accessorErr == nil {
p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil,
p.applyPatch,

View File

@ -18,6 +18,7 @@ package handlers
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
@ -38,8 +39,9 @@ import (
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/metrics"
endpointsrequest "k8s.io/apiserver/pkg/endpoints/request"
klog "k8s.io/klog/v2"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/util/apihelpers"
"k8s.io/klog/v2"
)
// watchEmbeddedEncoder performs encoding of the embedded object.
@ -147,6 +149,8 @@ type watchEncoder struct {
encoder runtime.Encoder
framer io.Writer
watchListTransformerFn watchListTransformerFunction
buffer runtime.Splice
eventBuffer runtime.Splice
@ -154,15 +158,16 @@ type watchEncoder struct {
identifiers map[watch.EventType]runtime.Identifier
}
func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer) *watchEncoder {
func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer, watchListTransformerFn watchListTransformerFunction) *watchEncoder {
return &watchEncoder{
ctx: ctx,
kind: kind,
embeddedEncoder: embeddedEncoder,
encoder: encoder,
framer: framer,
buffer: runtime.NewSpliceBuffer(),
eventBuffer: runtime.NewSpliceBuffer(),
ctx: ctx,
kind: kind,
embeddedEncoder: embeddedEncoder,
encoder: encoder,
framer: framer,
watchListTransformerFn: watchListTransformerFn,
buffer: runtime.NewSpliceBuffer(),
eventBuffer: runtime.NewSpliceBuffer(),
}
}
@ -174,6 +179,12 @@ func (e *watchEncoder) Encode(event watch.Event) error {
encodeFunc := func(obj runtime.Object, w io.Writer) error {
return e.doEncode(obj, event, w)
}
if event.Type == watch.Bookmark {
// Bookmark objects are small, and we don't yet support serialization for them.
// Additionally, we need to additionally transform them to support watch-list feature
event = e.watchListTransformerFn(event)
return encodeFunc(event.Object, e.framer)
}
if co, ok := event.Object.(runtime.CacheableObject); ok {
return co.CacheEncode(e.identifier(event.Type), encodeFunc, e.framer)
}
@ -270,7 +281,7 @@ func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}
return asTable(ctx, obj, options, scope, target.GroupVersion())
default:
accepted, _ := negotiation.MediaTypesForSerializer(metainternalversionscheme.Codecs)
accepted, _ := negotiation.MediaTypesForSerializer(apihelpers.GetMetaInternalVersionCodecs())
err := negotiation.NewNotAcceptableError(accepted)
return nil, err
}
@ -304,7 +315,7 @@ func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.Media
case target == nil:
case (target.Kind == "PartialObjectMetadata" || target.Kind == "PartialObjectMetadataList" || target.Kind == "Table") &&
(target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion):
return *target, metainternalversionscheme.Codecs, true
return *target, apihelpers.GetMetaInternalVersionCodecs(), true
}
return scope.Kind, scope.Serializer, false
}
@ -479,3 +490,94 @@ func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.Grou
return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion))
}
}
// watchListTransformerFunction an optional function
// applied to watchlist bookmark events that transforms
// the embedded object before sending it to a client.
type watchListTransformerFunction func(watch.Event) watch.Event
// watchListTransformer performs transformation of
// a special watchList bookmark event.
//
// The bookmark is annotated with InitialEventsListBlueprintAnnotationKey
// and contains an empty, versioned list that we must encode in the requested format
// (e.g., protobuf, JSON, CBOR) and then store as a base64-encoded string.
type watchListTransformer struct {
initialEventsListBlueprint runtime.Object
targetGVK *schema.GroupVersionKind
negotiatedEncoder runtime.Encoder
buffer runtime.Splice
}
// createWatchListTransformerIfRequested returns a transformer function for watchlist bookmark event.
func newWatchListTransformer(initialEventsListBlueprint runtime.Object, targetGVK *schema.GroupVersionKind, negotiatedEncoder runtime.Encoder) *watchListTransformer {
return &watchListTransformer{
initialEventsListBlueprint: initialEventsListBlueprint,
targetGVK: targetGVK,
negotiatedEncoder: negotiatedEncoder,
buffer: runtime.NewSpliceBuffer(),
}
}
func (e *watchListTransformer) transform(event watch.Event) watch.Event {
if e.initialEventsListBlueprint == nil {
return event
}
hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object)
if err != nil {
return newWatchEventErrorFor(err)
}
if !hasAnnotation {
return event
}
if err = e.encodeInitialEventsListBlueprint(event.Object); err != nil {
return newWatchEventErrorFor(err)
}
return event
}
func (e *watchListTransformer) encodeInitialEventsListBlueprint(object runtime.Object) error {
initialEventsListBlueprint, err := e.transformInitialEventsListBlueprint()
if err != nil {
return err
}
defer e.buffer.Reset()
if err = e.negotiatedEncoder.Encode(initialEventsListBlueprint, e.buffer); err != nil {
return err
}
encodedInitialEventsListBlueprint := e.buffer.Bytes()
// the storage layer creates a deep copy of the obj before modifying it.
// since the object has the annotation, we can modify it directly.
objectMeta, err := meta.Accessor(object)
if err != nil {
return err
}
annotations := objectMeta.GetAnnotations()
annotations[metav1.InitialEventsListBlueprintAnnotationKey] = base64.StdEncoding.EncodeToString(encodedInitialEventsListBlueprint)
objectMeta.SetAnnotations(annotations)
return nil
}
func (e *watchListTransformer) transformInitialEventsListBlueprint() (runtime.Object, error) {
if e.targetGVK != nil && e.targetGVK.Kind == "PartialObjectMetadata" {
return asPartialObjectMetadataList(e.initialEventsListBlueprint, e.targetGVK.GroupVersion())
}
return e.initialEventsListBlueprint, nil
}
func newWatchEventErrorFor(err error) watch.Event {
return watch.Event{
Type: watch.Error,
Object: &metav1.Status{
Status: metav1.StatusFailure,
Message: err.Error(),
Reason: metav1.StatusReasonInternalError,
Code: http.StatusInternalServerError,
},
}
}

View File

@ -34,18 +34,24 @@ var sanitizer = strings.NewReplacer(`&`, "&amp;", `<`, "&lt;", `>`, "&gt;")
// Forbidden renders a simple forbidden error
func Forbidden(ctx context.Context, attributes authorizer.Attributes, w http.ResponseWriter, req *http.Request, reason string, s runtime.NegotiatedSerializer) {
msg := sanitizer.Replace(forbiddenMessage(attributes))
w.Header().Set("X-Content-Type-Options", "nosniff")
var errMsg string
if len(reason) == 0 {
errMsg = fmt.Sprintf("%s", msg)
} else {
errMsg = fmt.Sprintf("%s: %s", msg, reason)
}
gv := schema.GroupVersion{Group: attributes.GetAPIGroup(), Version: attributes.GetAPIVersion()}
ErrorNegotiated(ForbiddenStatusError(attributes, reason), s, gv, w, req)
}
func ForbiddenStatusError(attributes authorizer.Attributes, reason string) *apierrors.StatusError {
msg := sanitizer.Replace(forbiddenMessage(attributes))
var errMsg error
if len(reason) == 0 {
errMsg = fmt.Errorf("%s", msg)
} else {
errMsg = fmt.Errorf("%s: %s", msg, reason)
}
gr := schema.GroupResource{Group: attributes.GetAPIGroup(), Resource: attributes.GetResource()}
ErrorNegotiated(apierrors.NewForbidden(gr, attributes.GetName(), fmt.Errorf(errMsg)), s, gv, w, req)
return apierrors.NewForbidden(gr, attributes.GetName(), errMsg)
}
func forbiddenMessage(attributes authorizer.Attributes) string {

View File

@ -98,6 +98,7 @@ func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.Response
attribute.String("protocol", req.Proto),
attribute.String("mediaType", mediaType),
attribute.String("encoder", string(encoder.Identifier())))
req = req.WithContext(ctx)
defer span.End(5 * time.Second)
w := &deferredResponseWriter{
@ -284,7 +285,12 @@ func WriteObjectNegotiated(s runtime.NegotiatedSerializer, restrictions negotiat
audit.LogResponseObject(req.Context(), object, gv, s)
encoder := s.EncoderForVersion(serializer.Serializer, gv)
var encoder runtime.Encoder
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
encoder = s.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.Serializer), gv)
} else {
encoder = s.EncoderForVersion(serializer.Serializer, gv)
}
request.TrackSerializeResponseObjectLatency(req.Context(), func() {
if listGVKInContentType {
SerializeObject(generateMediaTypeWithGVK(serializer.MediaType, mediaType.Convert), encoder, w, req, statusCode, object)

View File

@ -39,6 +39,7 @@ import (
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/util/dryrun"
@ -52,6 +53,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa
ctx := req.Context()
// For performance tracking purposes.
ctx, span := tracing.Start(ctx, "Update", traceFields(req)...)
req = req.WithContext(ctx)
defer span.End(500 * time.Millisecond)
namespace, name, err := scope.Namer.Name(req)
@ -275,13 +277,7 @@ func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer
}
// The user is not authorized to perform this action, so we need to build the error response
gr := schema.GroupResource{
Group: attributes.GetAPIGroup(),
Resource: attributes.GetResource(),
}
name := attributes.GetName()
err := fmt.Errorf("%v", authorizerReason)
return errors.NewForbidden(gr, name, err)
return responsewriters.ForbiddenStatusError(attributes, authorizerReason)
}
}

View File

@ -64,7 +64,7 @@ func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) {
// serveWatchHandler returns a handle to serve a watch response.
// TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled.
func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) (http.Handler, error) {
func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string, initialEventsListBlueprint runtime.Object) (http.Handler, error) {
options, err := optionsForTransform(mediaTypeOptions, req)
if err != nil {
return nil, err
@ -76,40 +76,62 @@ func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOp
return nil, err
}
framer := serializer.StreamSerializer.Framer
streamSerializer := serializer.StreamSerializer.Serializer
encoder := scope.Serializer.EncoderForVersion(streamSerializer, scope.Kind.GroupVersion())
var encoder runtime.Encoder
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
encoder = scope.Serializer.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.StreamSerializer.Serializer), scope.Kind.GroupVersion())
} else {
encoder = scope.Serializer.EncoderForVersion(serializer.StreamSerializer.Serializer, scope.Kind.GroupVersion())
}
useTextFraming := serializer.EncodesAsText
if framer == nil {
return nil, fmt.Errorf("no framer defined for %q available for embedded encoding", serializer.MediaType)
}
// TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here
mediaType := serializer.MediaType
if mediaType != runtime.ContentTypeJSON {
switch mediaType {
case runtime.ContentTypeJSON:
// as-is
case runtime.ContentTypeCBOR:
// If a client indicated it accepts application/cbor (exactly one data item) on a
// watch request, set the conformant application/cbor-seq media type the watch
// response. RFC 9110 allows an origin server to deviate from the indicated
// preference rather than send a 406 (Not Acceptable) response (see
// https://www.rfc-editor.org/rfc/rfc9110.html#section-12.1-5).
mediaType = runtime.ContentTypeCBORSequence
default:
mediaType += ";stream=watch"
}
ctx := req.Context()
// locate the appropriate embedded encoder based on the transform
var embeddedEncoder runtime.Encoder
var negotiatedEncoder runtime.Encoder
contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req)
if transform {
info, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType)
if !ok {
return nil, fmt.Errorf("no encoder for %q exists in the requested target %#v", serializer.MediaType, contentSerializer)
}
embeddedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion())
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
negotiatedEncoder = contentSerializer.EncoderForVersion(runtime.UseNondeterministicEncoding(info.Serializer), contentKind.GroupVersion())
} else {
negotiatedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion())
}
} else {
embeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion())
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
negotiatedEncoder = scope.Serializer.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.Serializer), contentKind.GroupVersion())
} else {
negotiatedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion())
}
}
var memoryAllocator runtime.MemoryAllocator
if encoderWithAllocator, supportsAllocator := embeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator {
if encoderWithAllocator, supportsAllocator := negotiatedEncoder.(runtime.EncoderWithAllocator); supportsAllocator {
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
embeddedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
negotiatedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
}
var tableOptions *metav1.TableOptions
if options != nil {
@ -119,7 +141,7 @@ func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOp
return nil, fmt.Errorf("unexpected options type: %T", options)
}
}
embeddedEncoder = newWatchEmbeddedEncoder(ctx, embeddedEncoder, mediaTypeOptions.Convert, tableOptions, scope)
embeddedEncoder := newWatchEmbeddedEncoder(ctx, negotiatedEncoder, mediaTypeOptions.Convert, tableOptions, scope)
if encoderWithAllocator, supportsAllocator := encoder.(runtime.EncoderWithAllocator); supportsAllocator {
if memoryAllocator == nil {
@ -145,6 +167,8 @@ func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOp
Encoder: encoder,
EmbeddedEncoder: embeddedEncoder,
watchListTransformerFn: newWatchListTransformer(initialEventsListBlueprint, mediaTypeOptions.Convert, negotiatedEncoder).transform,
MemoryAllocator: memoryAllocator,
TimeoutFactory: &realTimeoutFactory{timeout},
ServerShuttingDownCh: serverShuttingDownCh,
@ -174,6 +198,10 @@ type WatchServer struct {
Encoder runtime.Encoder
// used to encode the nested object in the watch stream
EmbeddedEncoder runtime.Encoder
// watchListTransformerFn a function applied
// to watchlist bookmark events that transforms
// the embedded object before sending it to a client.
watchListTransformerFn watchListTransformerFunction
MemoryAllocator runtime.MemoryAllocator
TimeoutFactory TimeoutFactory
@ -219,7 +247,7 @@ func (s *WatchServer) HandleHTTP(w http.ResponseWriter, req *http.Request) {
flusher.Flush()
kind := s.Scope.Kind
watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer)
watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer, s.watchListTransformerFn)
ch := s.Watching.ResultChan()
done := req.Context().Done()
@ -288,7 +316,7 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
framer := newWebsocketFramer(ws, s.UseTextFraming)
kind := s.Scope.Kind
watchEncoder := newWatchEncoder(context.TODO(), kind, s.EmbeddedEncoder, s.Encoder, framer)
watchEncoder := newWatchEncoder(context.TODO(), kind, s.EmbeddedEncoder, s.Encoder, framer, s.watchListTransformerFn)
ch := s.Watching.ResultChan()
for {

View File

@ -685,9 +685,27 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
reqScope.MetaGroupVersion = *a.group.MetaGroupVersion
}
var resetFields map[fieldpath.APIVersion]*fieldpath.Set
if resetFieldsStrategy, isResetFieldsStrategy := storage.(rest.ResetFieldsStrategy); isResetFieldsStrategy {
resetFields = resetFieldsStrategy.GetResetFields()
// Strategies may ignore changes to some fields by resetting the field values.
//
// For instance, spec resource strategies should reset the status, and status subresource
// strategies should reset the spec.
//
// Strategies that reset fields must report to the field manager which fields are
// reset by implementing either the ResetFieldsStrategy or the ResetFieldsFilterStrategy
// interface.
//
// For subresources that provide write access to only specific nested fields
// fieldpath.NewPatternFilter can help create a filter to reset all other fields.
var resetFieldsFilter map[fieldpath.APIVersion]fieldpath.Filter
resetFieldsStrategy, isResetFieldsStrategy := storage.(rest.ResetFieldsStrategy)
if isResetFieldsStrategy {
resetFieldsFilter = fieldpath.NewExcludeFilterSetMap(resetFieldsStrategy.GetResetFields())
}
if resetFieldsStrategy, isResetFieldsFilterStrategy := storage.(rest.ResetFieldsFilterStrategy); isResetFieldsFilterStrategy {
if isResetFieldsStrategy {
return nil, nil, fmt.Errorf("may not implement both ResetFieldsStrategy and ResetFieldsFilterStrategy")
}
resetFieldsFilter = resetFieldsStrategy.GetResetFieldsFilter()
}
reqScope.FieldManager, err = managedfields.NewDefaultFieldManager(
@ -698,7 +716,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
fqKindToRegister,
reqScope.HubGroupVersion,
subresource,
resetFields,
resetFieldsFilter,
)
if err != nil {
return nil, nil, fmt.Errorf("failed to create field manager: %v", err)
@ -875,7 +893,10 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
string(types.JSONPatchType),
string(types.MergePatchType),
string(types.StrategicMergePatchType),
string(types.ApplyPatchType),
string(types.ApplyYAMLPatchType),
}
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
supportedTypes = append(supportedTypes, string(types.ApplyCBORPatchType))
}
handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, deprecated, removedRelease, restfulPatchResource(patcher, reqScope, admit, supportedTypes))
handler = utilwarning.AddWarningsHandler(handler, warnings)
@ -1195,6 +1216,8 @@ func typeToJSON(typeName string) string {
return "string"
case "v1.IncludeObjectPolicy", "*v1.IncludeObjectPolicy":
return "string"
case "*string":
return "string"
// TODO: Fix these when go-restful supports a way to specify an array query param:
// https://github.com/emicklei/go-restful/issues/225

View File

@ -416,6 +416,33 @@ func Reset() {
}
}
// ResetLabelAllowLists resets the label allow lists for all metrics.
// NOTE: This is only used for testing.
func ResetLabelAllowLists() {
for _, metric := range metrics {
if counterVec, ok := metric.(*compbasemetrics.CounterVec); ok {
counterVec.ResetLabelAllowLists()
continue
}
if gaugeVec, ok := metric.(*compbasemetrics.GaugeVec); ok {
gaugeVec.ResetLabelAllowLists()
continue
}
if histogramVec, ok := metric.(*compbasemetrics.HistogramVec); ok {
histogramVec.ResetLabelAllowLists()
continue
}
if summaryVec, ok := metric.(*compbasemetrics.SummaryVec); ok {
summaryVec.ResetLabelAllowLists()
continue
}
if timingHistogramVec, ok := metric.(*compbasemetrics.TimingHistogramVec); ok {
timingHistogramVec.ResetLabelAllowLists()
continue
}
}
}
// UpdateInflightRequestMetrics reports concurrency metrics classified by
// mutating vs Readonly.
func UpdateInflightRequestMetrics(phase string, nonmutating, mutating int) {