mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to 1.30
updating kubernetes to 1.30 release Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
62ddcf715b
commit
e727bd351e
4
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go
generated
vendored
4
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
)
|
||||
|
||||
@ -39,6 +40,7 @@ import (
|
||||
func ServeHTTPWithETag(
|
||||
object runtime.Object,
|
||||
hash string,
|
||||
targetGV schema.GroupVersion,
|
||||
serializer runtime.NegotiatedSerializer,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
@ -64,7 +66,7 @@ func ServeHTTPWithETag(
|
||||
responsewriters.WriteObjectNegotiated(
|
||||
serializer,
|
||||
DiscoveryEndpointRestrictions,
|
||||
AggregatedDiscoveryGV,
|
||||
targetGV,
|
||||
w,
|
||||
req,
|
||||
http.StatusOK,
|
||||
|
6
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/fake.go
generated
vendored
6
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/fake.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
@ -122,7 +122,7 @@ func (f *recorderResourceManager) SetGroupVersionPriority(gv metav1.GroupVersion
|
||||
})
|
||||
}
|
||||
|
||||
func (f *recorderResourceManager) AddGroupVersion(groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) {
|
||||
func (f *recorderResourceManager) AddGroupVersion(groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
@ -153,7 +153,7 @@ func (f *recorderResourceManager) RemoveGroupVersion(gv metav1.GroupVersion) {
|
||||
})
|
||||
|
||||
}
|
||||
func (f *recorderResourceManager) SetGroups(values []apidiscoveryv2beta1.APIGroupDiscovery) {
|
||||
func (f *recorderResourceManager) SetGroups(values []apidiscoveryv2.APIGroupDiscovery) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
|
72
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go
generated
vendored
72
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go
generated
vendored
@ -17,15 +17,22 @@ limitations under the License.
|
||||
package aggregated
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
apidiscoveryv2conversion "k8s.io/apiserver/pkg/apis/apidiscovery/v2"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
|
||||
"sync/atomic"
|
||||
@ -51,7 +58,7 @@ type ResourceManager interface {
|
||||
// Adds knowledge of the given groupversion to the discovery document
|
||||
// If it was already being tracked, updates the stored APIVersionDiscovery
|
||||
// Thread-safe
|
||||
AddGroupVersion(groupName string, value apidiscoveryv2beta1.APIVersionDiscovery)
|
||||
AddGroupVersion(groupName string, value apidiscoveryv2.APIVersionDiscovery)
|
||||
|
||||
// Sets a priority to be used while sorting a specific group and
|
||||
// group-version. If two versions report different priorities for
|
||||
@ -72,7 +79,7 @@ type ResourceManager interface {
|
||||
// Resets the manager's known list of group-versions and replaces them
|
||||
// with the given groups
|
||||
// Thread-Safe
|
||||
SetGroups([]apidiscoveryv2beta1.APIGroupDiscovery)
|
||||
SetGroups([]apidiscoveryv2.APIGroupDiscovery)
|
||||
|
||||
// Returns the same resource manager using a different source
|
||||
// The source is used to decide how to de-duplicate groups.
|
||||
@ -87,7 +94,7 @@ type resourceManager struct {
|
||||
*resourceDiscoveryManager
|
||||
}
|
||||
|
||||
func (rm resourceManager) AddGroupVersion(groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) {
|
||||
func (rm resourceManager) AddGroupVersion(groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
rm.resourceDiscoveryManager.AddGroupVersion(rm.source, groupName, value)
|
||||
}
|
||||
func (rm resourceManager) SetGroupVersionPriority(gv metav1.GroupVersion, grouppriority, versionpriority int) {
|
||||
@ -99,7 +106,7 @@ func (rm resourceManager) RemoveGroup(groupName string) {
|
||||
func (rm resourceManager) RemoveGroupVersion(gv metav1.GroupVersion) {
|
||||
rm.resourceDiscoveryManager.RemoveGroupVersion(rm.source, gv)
|
||||
}
|
||||
func (rm resourceManager) SetGroups(groups []apidiscoveryv2beta1.APIGroupDiscovery) {
|
||||
func (rm resourceManager) SetGroups(groups []apidiscoveryv2.APIGroupDiscovery) {
|
||||
rm.resourceDiscoveryManager.SetGroups(rm.source, groups)
|
||||
}
|
||||
|
||||
@ -133,7 +140,7 @@ type resourceDiscoveryManager struct {
|
||||
// Writes protected by the lock.
|
||||
// List of all apigroups & resources indexed by the resource manager
|
||||
lock sync.RWMutex
|
||||
apiGroups map[groupKey]*apidiscoveryv2beta1.APIGroupDiscovery
|
||||
apiGroups map[groupKey]*apidiscoveryv2.APIGroupDiscovery
|
||||
versionPriorities map[groupVersionKey]priorityInfo
|
||||
}
|
||||
|
||||
@ -144,8 +151,12 @@ type priorityInfo struct {
|
||||
|
||||
func NewResourceManager(path string) ResourceManager {
|
||||
scheme := runtime.NewScheme()
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
utilruntime.Must(apidiscoveryv2.AddToScheme(scheme))
|
||||
utilruntime.Must(apidiscoveryv2beta1.AddToScheme(scheme))
|
||||
// Register conversion for apidiscovery
|
||||
utilruntime.Must(apidiscoveryv2conversion.RegisterConversions(scheme))
|
||||
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
rdm := &resourceDiscoveryManager{
|
||||
serializer: codecs,
|
||||
versionPriorities: make(map[groupVersionKey]priorityInfo),
|
||||
@ -181,7 +192,7 @@ func (rdm *resourceDiscoveryManager) SetGroupVersionPriority(source Source, gv m
|
||||
rdm.cache.Store(nil)
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) SetGroups(source Source, groups []apidiscoveryv2beta1.APIGroupDiscovery) {
|
||||
func (rdm *resourceDiscoveryManager) SetGroups(source Source, groups []apidiscoveryv2.APIGroupDiscovery) {
|
||||
rdm.lock.Lock()
|
||||
defer rdm.lock.Unlock()
|
||||
|
||||
@ -221,17 +232,17 @@ func (rdm *resourceDiscoveryManager) SetGroups(source Source, groups []apidiscov
|
||||
}
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) AddGroupVersion(source Source, groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) {
|
||||
func (rdm *resourceDiscoveryManager) AddGroupVersion(source Source, groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
rdm.lock.Lock()
|
||||
defer rdm.lock.Unlock()
|
||||
|
||||
rdm.addGroupVersionLocked(source, groupName, value)
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) {
|
||||
func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
|
||||
if rdm.apiGroups == nil {
|
||||
rdm.apiGroups = make(map[groupKey]*apidiscoveryv2beta1.APIGroupDiscovery)
|
||||
rdm.apiGroups = make(map[groupKey]*apidiscoveryv2.APIGroupDiscovery)
|
||||
}
|
||||
|
||||
key := groupKey{
|
||||
@ -264,11 +275,11 @@ func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupN
|
||||
}
|
||||
|
||||
} else {
|
||||
group := &apidiscoveryv2beta1.APIGroupDiscovery{
|
||||
group := &apidiscoveryv2.APIGroupDiscovery{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: groupName,
|
||||
},
|
||||
Versions: []apidiscoveryv2beta1.APIVersionDiscovery{value},
|
||||
Versions: []apidiscoveryv2.APIVersionDiscovery{value},
|
||||
}
|
||||
rdm.apiGroups[key] = group
|
||||
}
|
||||
@ -354,12 +365,12 @@ func (rdm *resourceDiscoveryManager) RemoveGroup(source Source, groupName string
|
||||
|
||||
// Prepares the api group list for serving by converting them from map into
|
||||
// list and sorting them according to insertion order
|
||||
func (rdm *resourceDiscoveryManager) calculateAPIGroupsLocked() []apidiscoveryv2beta1.APIGroupDiscovery {
|
||||
func (rdm *resourceDiscoveryManager) calculateAPIGroupsLocked() []apidiscoveryv2.APIGroupDiscovery {
|
||||
regenerationCounter.Inc()
|
||||
// Re-order the apiGroups by their priority.
|
||||
groups := []apidiscoveryv2beta1.APIGroupDiscovery{}
|
||||
groups := []apidiscoveryv2.APIGroupDiscovery{}
|
||||
|
||||
groupsToUse := map[string]apidiscoveryv2beta1.APIGroupDiscovery{}
|
||||
groupsToUse := map[string]apidiscoveryv2.APIGroupDiscovery{}
|
||||
sourcesUsed := map[metav1.GroupVersion]Source{}
|
||||
|
||||
for key, group := range rdm.apiGroups {
|
||||
@ -475,7 +486,7 @@ func (rdm *resourceDiscoveryManager) fetchFromCache() *cachedGroupList {
|
||||
if cacheLoad != nil {
|
||||
return cacheLoad
|
||||
}
|
||||
response := apidiscoveryv2beta1.APIGroupDiscoveryList{
|
||||
response := apidiscoveryv2.APIGroupDiscoveryList{
|
||||
Items: rdm.calculateAPIGroupsLocked(),
|
||||
}
|
||||
etag, err := calculateETag(response)
|
||||
@ -492,7 +503,13 @@ func (rdm *resourceDiscoveryManager) fetchFromCache() *cachedGroupList {
|
||||
}
|
||||
|
||||
type cachedGroupList struct {
|
||||
cachedResponse apidiscoveryv2beta1.APIGroupDiscoveryList
|
||||
cachedResponse apidiscoveryv2.APIGroupDiscoveryList
|
||||
// etag is calculated based on a SHA hash of only the JSON object.
|
||||
// A response via different Accept encodings (eg: protobuf, json) will
|
||||
// yield the same etag. This is okay because Accept is part of the Vary header.
|
||||
// Per RFC7231 a client must only cache a response etag pair if the header field
|
||||
// matches as indicated by the Vary field. Thus, protobuf and json and other Accept
|
||||
// encodings will not be cached as the same response despite having the same etag.
|
||||
cachedResponseETag string
|
||||
}
|
||||
|
||||
@ -505,11 +522,30 @@ func (rdm *resourceDiscoveryManager) serveHTTP(resp http.ResponseWriter, req *ht
|
||||
response := cache.cachedResponse
|
||||
etag := cache.cachedResponseETag
|
||||
|
||||
mediaType, _, err := negotiation.NegotiateOutputMediaType(req, rdm.serializer, DiscoveryEndpointRestrictions)
|
||||
if err != nil {
|
||||
// Should never happen. wrapper.go will only proxy requests to this
|
||||
// handler if the media type passes DiscoveryEndpointRestrictions
|
||||
utilruntime.HandleError(err)
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var targetGV schema.GroupVersion
|
||||
if mediaType.Convert == nil ||
|
||||
(mediaType.Convert.GroupVersion() != apidiscoveryv2.SchemeGroupVersion &&
|
||||
mediaType.Convert.GroupVersion() != apidiscoveryv2beta1.SchemeGroupVersion) {
|
||||
utilruntime.HandleError(fmt.Errorf("expected aggregated discovery group version, got group: %s, version %s", mediaType.Convert.Group, mediaType.Convert.Version))
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
targetGV = mediaType.Convert.GroupVersion()
|
||||
|
||||
if len(etag) > 0 {
|
||||
// Use proper e-tag headers if one is available
|
||||
ServeHTTPWithETag(
|
||||
&response,
|
||||
etag,
|
||||
targetGV,
|
||||
rdm.serializer,
|
||||
resp,
|
||||
req,
|
||||
@ -520,7 +556,7 @@ func (rdm *resourceDiscoveryManager) serveHTTP(resp http.ResponseWriter, req *ht
|
||||
responsewriters.WriteObjectNegotiated(
|
||||
rdm.serializer,
|
||||
DiscoveryEndpointRestrictions,
|
||||
AggregatedDiscoveryGV,
|
||||
targetGV,
|
||||
resp,
|
||||
req,
|
||||
http.StatusOK,
|
||||
|
4
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/negotiation.go
generated
vendored
4
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/negotiation.go
generated
vendored
@ -20,8 +20,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var AggregatedDiscoveryGV = schema.GroupVersion{Group: "apidiscovery.k8s.io", Version: "v2beta1"}
|
||||
|
||||
// Interface is from "k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
|
||||
// DiscoveryEndpointRestrictions allows requests to /apis to provide a Content Negotiation GVK for aggregated discovery.
|
||||
@ -39,7 +37,7 @@ func (discoveryEndpointRestrictions) AllowsStreamSchema(s string) bool { return
|
||||
// IsAggregatedDiscoveryGVK checks if a provided GVK is the GVK for serving aggregated discovery.
|
||||
func IsAggregatedDiscoveryGVK(gvk *schema.GroupVersionKind) bool {
|
||||
if gvk != nil {
|
||||
return gvk.Group == "apidiscovery.k8s.io" && gvk.Version == "v2beta1" && gvk.Kind == "APIGroupDiscoveryList"
|
||||
return gvk.Group == "apidiscovery.k8s.io" && (gvk.Version == "v2beta1" || gvk.Version == "v2") && gvk.Kind == "APIGroupDiscoveryList"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
7
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/wrapper.go
generated
vendored
7
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/wrapper.go
generated
vendored
@ -19,8 +19,10 @@ package aggregated
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -69,10 +71,11 @@ func (wrapped *WrappedHandler) GenerateWebService(prefix string, returnType inte
|
||||
// WrapAggregatedDiscoveryToHandler wraps a handler with an option to
|
||||
// emit the aggregated discovery by passing in the aggregated
|
||||
// discovery type in content negotiation headers: eg: (Accept:
|
||||
// application/json;v=v2beta1;g=apidiscovery.k8s.io;as=APIGroupDiscoveryList)
|
||||
// application/json;v=v2;g=apidiscovery.k8s.io;as=APIGroupDiscoveryList)
|
||||
func WrapAggregatedDiscoveryToHandler(handler http.Handler, aggHandler http.Handler) *WrappedHandler {
|
||||
scheme := runtime.NewScheme()
|
||||
apidiscoveryv2beta1.AddToScheme(scheme)
|
||||
utilruntime.Must(apidiscoveryv2.AddToScheme(scheme))
|
||||
utilruntime.Must(apidiscoveryv2beta1.AddToScheme(scheme))
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
return &WrappedHandler{codecs, handler, aggHandler}
|
||||
}
|
||||
|
14
vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go
generated
vendored
14
vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go
generated
vendored
@ -58,7 +58,7 @@ func withRequestDeadline(handler http.Handler, sink audit.Sink, policy audit.Pol
|
||||
|
||||
requestInfo, ok := request.RequestInfoFrom(ctx)
|
||||
if !ok {
|
||||
handleError(w, req, http.StatusInternalServerError, fmt.Errorf("no RequestInfo found in context, handler chain must be wrong"))
|
||||
handleError(w, req, http.StatusInternalServerError, nil, "no RequestInfo found in context, handler chain must be wrong")
|
||||
return
|
||||
}
|
||||
if longRunning(req, requestInfo) {
|
||||
@ -166,8 +166,12 @@ func parseTimeout(req *http.Request) (time.Duration, bool, error) {
|
||||
return timeout, true, nil
|
||||
}
|
||||
|
||||
func handleError(w http.ResponseWriter, r *http.Request, code int, err error) {
|
||||
errorMsg := fmt.Sprintf("Error - %s: %#v", err.Error(), r.RequestURI)
|
||||
http.Error(w, errorMsg, code)
|
||||
klog.Errorf(errorMsg)
|
||||
// handleError does the following:
|
||||
// a) it writes the specified error code, and msg to the ResponseWriter
|
||||
// object, it does not print the given innerErr into the ResponseWriter object.
|
||||
// b) additionally, it prints the given msg, and innerErr to the log with other
|
||||
// request scoped data that helps identify the given request.
|
||||
func handleError(w http.ResponseWriter, r *http.Request, code int, innerErr error, msg string) {
|
||||
http.Error(w, msg, code)
|
||||
klog.ErrorSDepth(1, innerErr, msg, "method", r.Method, "URI", r.RequestURI, "auditID", audit.GetAuditIDTruncated(r.Context()))
|
||||
}
|
||||
|
3
vendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go
generated
vendored
3
vendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go
generated
vendored
@ -18,7 +18,6 @@ package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@ -39,7 +38,7 @@ func WithLatencyTrackers(handler http.Handler) http.Handler {
|
||||
ctx := req.Context()
|
||||
requestInfo, ok := request.RequestInfoFrom(ctx)
|
||||
if !ok {
|
||||
handleError(w, req, http.StatusInternalServerError, fmt.Errorf("no RequestInfo found in context, handler chain must be wrong"))
|
||||
handleError(w, req, http.StatusInternalServerError, nil, "no RequestInfo found in context, handler chain must be wrong")
|
||||
return
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go
generated
vendored
4
vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
|
||||
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -107,7 +107,7 @@ type APIGroupVersion struct {
|
||||
// InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container.
|
||||
// It is expected that the provided path root prefix will serve all operations. Root MUST NOT end
|
||||
// in a slash.
|
||||
func (g *APIGroupVersion) InstallREST(container *restful.Container) ([]apidiscoveryv2beta1.APIResourceDiscovery, []*storageversion.ResourceInfo, error) {
|
||||
func (g *APIGroupVersion) InstallREST(container *restful.Container) ([]apidiscoveryv2.APIResourceDiscovery, []*storageversion.ResourceInfo, error) {
|
||||
prefix := path.Join(g.Root, g.GroupVersion.Group, g.GroupVersion.Version)
|
||||
installer := &APIInstaller{
|
||||
group: g,
|
||||
|
2
vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml
generated
vendored
2
vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml
generated
vendored
@ -120,7 +120,7 @@ status:
|
||||
type: PIDPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:49Z"
|
||||
message: kubelet is posting ready status. AppArmor enabled
|
||||
message: kubelet is posting ready status
|
||||
reason: KubeletReady
|
||||
status: "True"
|
||||
type: Ready
|
||||
|
32
vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go
generated
vendored
32
vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go
generated
vendored
@ -41,6 +41,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
genericfilters "k8s.io/apiserver/pkg/server/filters"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
@ -259,16 +260,37 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
|
||||
}
|
||||
klog.V(3).InfoS("Starting watch", "path", req.URL.Path, "resourceVersion", opts.ResourceVersion, "labels", opts.LabelSelector, "fields", opts.FieldSelector, "timeout", timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
defer func() { cancel() }()
|
||||
watcher, err := rw.Watch(ctx, &opts)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
requestInfo, _ := request.RequestInfoFrom(ctx)
|
||||
metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {
|
||||
serveWatch(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts))
|
||||
})
|
||||
handler, err := serveWatchHandler(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts))
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
// Invalidate cancel() to defer until serve() is complete.
|
||||
deferredCancel := cancel
|
||||
cancel = func() {}
|
||||
|
||||
serve := func() {
|
||||
defer deferredCancel()
|
||||
requestInfo, _ := request.RequestInfoFrom(ctx)
|
||||
metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {
|
||||
defer watcher.Stop()
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
// Run watch serving in a separate goroutine to allow freeing current stack memory
|
||||
t := genericfilters.TaskFrom(req.Context())
|
||||
if t != nil {
|
||||
t.Func = serve
|
||||
} else {
|
||||
serve()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
141
vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go
generated
vendored
141
vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go
generated
vendored
@ -17,8 +17,9 @@ limitations under the License.
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@ -61,30 +62,25 @@ func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) {
|
||||
return t.C, t.Stop
|
||||
}
|
||||
|
||||
// serveWatch will serve a watch response.
|
||||
// serveWatchHandler returns a handle to serve a watch response.
|
||||
// TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled.
|
||||
func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) {
|
||||
defer watcher.Stop()
|
||||
|
||||
func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) (http.Handler, error) {
|
||||
options, err := optionsForTransform(mediaTypeOptions, req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// negotiate for the stream serializer from the scope's serializer
|
||||
serializer, err := negotiation.NegotiateOutputMediaTypeStream(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
framer := serializer.StreamSerializer.Framer
|
||||
streamSerializer := serializer.StreamSerializer.Serializer
|
||||
encoder := scope.Serializer.EncoderForVersion(streamSerializer, scope.Kind.GroupVersion())
|
||||
useTextFraming := serializer.EncodesAsText
|
||||
if framer == nil {
|
||||
scope.err(fmt.Errorf("no framer defined for %q available for embedded encoding", serializer.MediaType), w, req)
|
||||
return
|
||||
return nil, fmt.Errorf("no framer defined for %q available for embedded encoding", serializer.MediaType)
|
||||
}
|
||||
// TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here
|
||||
mediaType := serializer.MediaType
|
||||
@ -100,8 +96,7 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
|
||||
if transform {
|
||||
info, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType)
|
||||
if !ok {
|
||||
scope.err(fmt.Errorf("no encoder for %q exists in the requested target %#v", serializer.MediaType, contentSerializer), w, req)
|
||||
return
|
||||
return nil, fmt.Errorf("no encoder for %q exists in the requested target %#v", serializer.MediaType, contentSerializer)
|
||||
}
|
||||
embeddedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion())
|
||||
} else {
|
||||
@ -114,7 +109,6 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
|
||||
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
|
||||
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
|
||||
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
|
||||
defer runtime.AllocatorPool.Put(memoryAllocator)
|
||||
embeddedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
|
||||
}
|
||||
var tableOptions *metav1.TableOptions
|
||||
@ -122,8 +116,7 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
|
||||
if passedOptions, ok := options.(*metav1.TableOptions); ok {
|
||||
tableOptions = passedOptions
|
||||
} else {
|
||||
scope.err(fmt.Errorf("unexpected options type: %T", options), w, req)
|
||||
return
|
||||
return nil, fmt.Errorf("unexpected options type: %T", options)
|
||||
}
|
||||
}
|
||||
embeddedEncoder = newWatchEmbeddedEncoder(ctx, embeddedEncoder, mediaTypeOptions.Convert, tableOptions, scope)
|
||||
@ -133,7 +126,6 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
|
||||
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
|
||||
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
|
||||
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
|
||||
defer runtime.AllocatorPool.Put(memoryAllocator)
|
||||
}
|
||||
encoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
|
||||
}
|
||||
@ -153,13 +145,18 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n
|
||||
Encoder: encoder,
|
||||
EmbeddedEncoder: embeddedEncoder,
|
||||
|
||||
MemoryAllocator: memoryAllocator,
|
||||
TimeoutFactory: &realTimeoutFactory{timeout},
|
||||
ServerShuttingDownCh: serverShuttingDownCh,
|
||||
|
||||
metricsScope: metricsScope,
|
||||
}
|
||||
|
||||
server.ServeHTTP(w, req)
|
||||
if wsstream.IsWebSocketRequest(req) {
|
||||
w.Header().Set("Content-Type", server.MediaType)
|
||||
return websocket.Handler(server.HandleWS), nil
|
||||
}
|
||||
return http.HandlerFunc(server.HandleHTTP), nil
|
||||
}
|
||||
|
||||
// WatchServer serves a watch.Interface over a websocket or vanilla HTTP.
|
||||
@ -178,22 +175,21 @@ type WatchServer struct {
|
||||
// used to encode the nested object in the watch stream
|
||||
EmbeddedEncoder runtime.Encoder
|
||||
|
||||
MemoryAllocator runtime.MemoryAllocator
|
||||
TimeoutFactory TimeoutFactory
|
||||
ServerShuttingDownCh <-chan struct{}
|
||||
|
||||
metricsScope string
|
||||
}
|
||||
|
||||
// ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked
|
||||
// HandleHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked.
|
||||
// or over a websocket connection.
|
||||
func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
kind := s.Scope.Kind
|
||||
|
||||
if wsstream.IsWebSocketRequest(req) {
|
||||
w.Header().Set("Content-Type", s.MediaType)
|
||||
websocket.Handler(s.HandleWS).ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
func (s *WatchServer) HandleHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
defer func() {
|
||||
if s.MemoryAllocator != nil {
|
||||
runtime.AllocatorPool.Put(s.MemoryAllocator)
|
||||
}
|
||||
}()
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
@ -222,6 +218,7 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
flusher.Flush()
|
||||
|
||||
kind := s.Scope.Kind
|
||||
watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer)
|
||||
ch := s.Watching.ResultChan()
|
||||
done := req.Context().Done()
|
||||
@ -265,10 +262,19 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// HandleWS implements a websocket handler.
|
||||
// HandleWS serves a series of encoded events over a websocket connection.
|
||||
func (s *WatchServer) HandleWS(ws *websocket.Conn) {
|
||||
defer func() {
|
||||
if s.MemoryAllocator != nil {
|
||||
runtime.AllocatorPool.Put(s.MemoryAllocator)
|
||||
}
|
||||
}()
|
||||
|
||||
defer ws.Close()
|
||||
done := make(chan struct{})
|
||||
// ensure the connection times out
|
||||
timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh()
|
||||
defer cleanup()
|
||||
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
@ -279,67 +285,62 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
|
||||
close(done)
|
||||
}()
|
||||
|
||||
var unknown runtime.Unknown
|
||||
internalEvent := &metav1.InternalEvent{}
|
||||
buf := &bytes.Buffer{}
|
||||
streamBuf := &bytes.Buffer{}
|
||||
framer := newWebsocketFramer(ws, s.UseTextFraming)
|
||||
|
||||
kind := s.Scope.Kind
|
||||
watchEncoder := newWatchEncoder(context.TODO(), kind, s.EmbeddedEncoder, s.Encoder, framer)
|
||||
ch := s.Watching.ResultChan()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-timeoutCh:
|
||||
return
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
// End of results.
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.EmbeddedEncoder.Encode(event.Object, buf); err != nil {
|
||||
// unexpected error
|
||||
utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", event.Object, err))
|
||||
return
|
||||
}
|
||||
|
||||
// ContentType is not required here because we are defaulting to the serializer
|
||||
// type
|
||||
unknown.Raw = buf.Bytes()
|
||||
event.Object = &unknown
|
||||
|
||||
// the internal event will be versioned by the encoder
|
||||
// create the external type directly and encode it. Clients will only recognize the serialization we provide.
|
||||
// The internal event is being reused, not reallocated so its just a few extra assignments to do it this way
|
||||
// and we get the benefit of using conversion functions which already have to stay in sync
|
||||
outEvent := &metav1.WatchEvent{}
|
||||
*internalEvent = metav1.InternalEvent(event)
|
||||
err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err))
|
||||
if err := watchEncoder.Encode(event); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
// client disconnect.
|
||||
return
|
||||
}
|
||||
if err := s.Encoder.Encode(outEvent, streamBuf); err != nil {
|
||||
// encoding error
|
||||
utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err))
|
||||
return
|
||||
}
|
||||
if s.UseTextFraming {
|
||||
if err := websocket.Message.Send(ws, streamBuf.String()); err != nil {
|
||||
// Client disconnect.
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil {
|
||||
// Client disconnect.
|
||||
return
|
||||
}
|
||||
}
|
||||
buf.Reset()
|
||||
streamBuf.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type websocketFramer struct {
|
||||
ws *websocket.Conn
|
||||
useTextFraming bool
|
||||
}
|
||||
|
||||
func newWebsocketFramer(ws *websocket.Conn, useTextFraming bool) io.Writer {
|
||||
return &websocketFramer{
|
||||
ws: ws,
|
||||
useTextFraming: useTextFraming,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *websocketFramer) Write(p []byte) (int, error) {
|
||||
if w.useTextFraming {
|
||||
// bytes.Buffer::String() has a special handling of nil value, but given
|
||||
// we're writing serialized watch events, this will never happen here.
|
||||
if err := websocket.Message.Send(w.ws, string(p)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
if err := websocket.Message.Send(w.ws, p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
var _ io.Writer = &websocketFramer{}
|
||||
|
||||
func shouldRecordWatchListLatency(event watch.Event) bool {
|
||||
if event.Type != watch.Bookmark || !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) {
|
||||
return false
|
||||
|
36
vendor/k8s.io/apiserver/pkg/endpoints/installer.go
generated
vendored
36
vendor/k8s.io/apiserver/pkg/endpoints/installer.go
generated
vendored
@ -26,7 +26,9 @@ import (
|
||||
"unicode"
|
||||
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -46,12 +48,12 @@ import (
|
||||
"k8s.io/apiserver/pkg/storageversion"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
versioninfo "k8s.io/component-base/version"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
const (
|
||||
ROUTE_META_GVK = "x-kubernetes-group-version-kind"
|
||||
ROUTE_META_ACTION = "x-kubernetes-action"
|
||||
RouteMetaGVK = "x-kubernetes-group-version-kind"
|
||||
RouteMetaSelectableFields = "x-kubernetes-selectable-fields"
|
||||
RouteMetaAction = "x-kubernetes-action"
|
||||
)
|
||||
|
||||
type APIInstaller struct {
|
||||
@ -69,8 +71,8 @@ type action struct {
|
||||
AllNamespaces bool // true iff the action is namespaced but works on aggregate result for all namespaces
|
||||
}
|
||||
|
||||
func ConvertGroupVersionIntoToDiscovery(list []metav1.APIResource) ([]apidiscoveryv2beta1.APIResourceDiscovery, error) {
|
||||
var apiResourceList []apidiscoveryv2beta1.APIResourceDiscovery
|
||||
func ConvertGroupVersionIntoToDiscovery(list []metav1.APIResource) ([]apidiscoveryv2.APIResourceDiscovery, error) {
|
||||
var apiResourceList []apidiscoveryv2.APIResourceDiscovery
|
||||
parentResources := make(map[string]int)
|
||||
|
||||
// Loop through all top-level resources
|
||||
@ -80,14 +82,14 @@ func ConvertGroupVersionIntoToDiscovery(list []metav1.APIResource) ([]apidiscove
|
||||
continue
|
||||
}
|
||||
|
||||
var scope apidiscoveryv2beta1.ResourceScope
|
||||
var scope apidiscoveryv2.ResourceScope
|
||||
if r.Namespaced {
|
||||
scope = apidiscoveryv2beta1.ScopeNamespace
|
||||
scope = apidiscoveryv2.ScopeNamespace
|
||||
} else {
|
||||
scope = apidiscoveryv2beta1.ScopeCluster
|
||||
scope = apidiscoveryv2.ScopeCluster
|
||||
}
|
||||
|
||||
resource := apidiscoveryv2beta1.APIResourceDiscovery{
|
||||
resource := apidiscoveryv2.APIResourceDiscovery{
|
||||
Resource: r.Name,
|
||||
Scope: scope,
|
||||
ResponseKind: &metav1.GroupVersionKind{
|
||||
@ -114,17 +116,17 @@ func ConvertGroupVersionIntoToDiscovery(list []metav1.APIResource) ([]apidiscove
|
||||
continue
|
||||
}
|
||||
|
||||
var scope apidiscoveryv2beta1.ResourceScope
|
||||
var scope apidiscoveryv2.ResourceScope
|
||||
if r.Namespaced {
|
||||
scope = apidiscoveryv2beta1.ScopeNamespace
|
||||
scope = apidiscoveryv2.ScopeNamespace
|
||||
} else {
|
||||
scope = apidiscoveryv2beta1.ScopeCluster
|
||||
scope = apidiscoveryv2.ScopeCluster
|
||||
}
|
||||
|
||||
parentidx, exists := parentResources[split[0]]
|
||||
if !exists {
|
||||
// If a subresource exists without a parent, create a parent
|
||||
apiResourceList = append(apiResourceList, apidiscoveryv2beta1.APIResourceDiscovery{
|
||||
apiResourceList = append(apiResourceList, apidiscoveryv2.APIResourceDiscovery{
|
||||
Resource: split[0],
|
||||
Scope: scope,
|
||||
// avoid nil panics in v0.26.0-v0.26.3 client-go clients
|
||||
@ -140,7 +142,7 @@ func ConvertGroupVersionIntoToDiscovery(list []metav1.APIResource) ([]apidiscove
|
||||
//
|
||||
}
|
||||
|
||||
subresource := apidiscoveryv2beta1.APISubresourceDiscovery{
|
||||
subresource := apidiscoveryv2.APISubresourceDiscovery{
|
||||
Subresource: split[1],
|
||||
Verbs: r.Verbs,
|
||||
// avoid nil panics in v0.26.0-v0.26.3 client-go clients
|
||||
@ -1059,12 +1061,12 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
|
||||
return nil, nil, fmt.Errorf("unrecognized action verb: %s", action.Verb)
|
||||
}
|
||||
for _, route := range routes {
|
||||
route.Metadata(ROUTE_META_GVK, metav1.GroupVersionKind{
|
||||
route.Metadata(RouteMetaGVK, metav1.GroupVersionKind{
|
||||
Group: reqScope.Kind.Group,
|
||||
Version: reqScope.Kind.Version,
|
||||
Kind: reqScope.Kind.Kind,
|
||||
})
|
||||
route.Metadata(ROUTE_META_ACTION, strings.ToLower(action.Verb))
|
||||
route.Metadata(RouteMetaAction, strings.ToLower(action.Verb))
|
||||
ws.Route(route)
|
||||
}
|
||||
// Note: update GetAuthorizerAttributes() when adding a custom handler.
|
||||
|
23
vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go
generated
vendored
23
vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go
generated
vendored
@ -152,6 +152,13 @@ type LatencyTrackers struct {
|
||||
// The Write method can be invoked multiple times, so we use a
|
||||
// latency tracker that sums up the duration from each call.
|
||||
ResponseWriteTracker DurationTracker
|
||||
|
||||
// DecodeTracker is used to track latency incurred inside the function
|
||||
// that takes an object returned from the underlying storage layer
|
||||
// (etcd) and performs decoding of the response object.
|
||||
// When called multiple times, the latency incurred inside to
|
||||
// decode func each time will be summed up.
|
||||
DecodeTracker DurationTracker
|
||||
}
|
||||
|
||||
type latencyTrackersKeyType int
|
||||
@ -177,6 +184,7 @@ func WithLatencyTrackersAndCustomClock(parent context.Context, c clock.Clock) co
|
||||
TransformTracker: newSumLatencyTracker(c),
|
||||
SerializationTracker: newSumLatencyTracker(c),
|
||||
ResponseWriteTracker: newSumLatencyTracker(c),
|
||||
DecodeTracker: newSumLatencyTracker(c),
|
||||
})
|
||||
}
|
||||
|
||||
@ -243,6 +251,17 @@ func TrackAPFQueueWaitLatency(ctx context.Context, d time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
// TrackDecodeLatency is used to track latency incurred inside the function
|
||||
// that takes an object returned from the underlying storage layer
|
||||
// (etcd) and performs decoding of the response object.
|
||||
// When called multiple times, the latency incurred inside to
|
||||
// decode func each time will be summed up.
|
||||
func TrackDecodeLatency(ctx context.Context, d time.Duration) {
|
||||
if tracker, ok := LatencyTrackersFrom(ctx); ok {
|
||||
tracker.DecodeTracker.TrackDuration(d)
|
||||
}
|
||||
}
|
||||
|
||||
// AuditAnnotationsFromLatencyTrackers will inspect each latency tracker
|
||||
// associated with the request context and return a set of audit
|
||||
// annotations that can be added to the API audit entry.
|
||||
@ -254,6 +273,7 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string
|
||||
responseWriteLatencyKey = "apiserver.latency.k8s.io/response-write"
|
||||
mutatingWebhookLatencyKey = "apiserver.latency.k8s.io/mutating-webhook"
|
||||
validatingWebhookLatencyKey = "apiserver.latency.k8s.io/validating-webhook"
|
||||
decodeLatencyKey = "apiserver.latency.k8s.io/decode-response-object"
|
||||
)
|
||||
|
||||
tracker, ok := LatencyTrackersFrom(ctx)
|
||||
@ -280,6 +300,9 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string
|
||||
if latency := tracker.ValidatingWebhookTracker.GetLatency(); latency != 0 {
|
||||
annotations[validatingWebhookLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.DecodeTracker.GetLatency(); latency != 0 {
|
||||
annotations[decodeLatencyKey] = latency.String()
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
Reference in New Issue
Block a user