mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
build: move e2e dependencies into e2e/go.mod
Several packages are only used while running the e2e suite. These packages are less important to update, as the they can not influence the final executable that is part of the Ceph-CSI container-image. By moving these dependencies out of the main Ceph-CSI go.mod, it is easier to identify if a reported CVE affects Ceph-CSI, or only the testing (like most of the Kubernetes CVEs). Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
15da101b1b
commit
bec6090996
2
e2e/vendor/k8s.io/apiserver/pkg/endpoints/OWNERS
generated
vendored
Normal file
2
e2e/vendor/k8s.io/apiserver/pkg/endpoints/OWNERS
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
approvers:
|
||||
- apelisse
|
134
e2e/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go
generated
vendored
Normal file
134
e2e/vendor/k8s.io/apiserver/pkg/endpoints/deprecation/deprecation.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deprecation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
)
|
||||
|
||||
type apiLifecycleDeprecated interface {
|
||||
APILifecycleDeprecated() (major, minor int)
|
||||
}
|
||||
|
||||
type apiLifecycleRemoved interface {
|
||||
APILifecycleRemoved() (major, minor int)
|
||||
}
|
||||
|
||||
type apiLifecycleReplacement interface {
|
||||
APILifecycleReplacement() schema.GroupVersionKind
|
||||
}
|
||||
|
||||
// extract all digits at the beginning of the string
|
||||
var leadingDigits = regexp.MustCompile(`^(\d+)`)
|
||||
|
||||
// MajorMinor parses a numeric major/minor version from the provided version info.
|
||||
// The minor version drops all characters after the first non-digit character:
|
||||
//
|
||||
// version.Info{Major:"1", Minor:"2+"} -> 1,2
|
||||
// version.Info{Major:"1", Minor:"2.3-build4"} -> 1,2
|
||||
func MajorMinor(v version.Info) (int, int, error) {
|
||||
major, err := strconv.Atoi(v.Major)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
minor, err := strconv.Atoi(leadingDigits.FindString(v.Minor))
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
return major, minor, nil
|
||||
}
|
||||
|
||||
// IsDeprecated returns true if obj implements APILifecycleDeprecated() and returns
|
||||
// a major/minor version that is non-zero and is <= the specified current major/minor version.
|
||||
func IsDeprecated(obj runtime.Object, currentMajor, currentMinor int) bool {
|
||||
deprecated, isDeprecated := obj.(apiLifecycleDeprecated)
|
||||
if !isDeprecated {
|
||||
return false
|
||||
}
|
||||
|
||||
deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated()
|
||||
// no deprecation version expressed
|
||||
if deprecatedMajor == 0 && deprecatedMinor == 0 {
|
||||
return false
|
||||
}
|
||||
// no current version info available
|
||||
if currentMajor == 0 && currentMinor == 0 {
|
||||
return true
|
||||
}
|
||||
// compare deprecation version to current version
|
||||
if deprecatedMajor > currentMajor {
|
||||
return false
|
||||
}
|
||||
if deprecatedMajor == currentMajor && deprecatedMinor > currentMinor {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RemovedRelease returns the major/minor version in which the given object is unavailable (in the form "<major>.<minor>")
|
||||
// if the object implements APILifecycleRemoved() to indicate a non-zero removal version, and returns an empty string otherwise.
|
||||
func RemovedRelease(obj runtime.Object) string {
|
||||
if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo {
|
||||
removedMajor, removedMinor := removed.APILifecycleRemoved()
|
||||
if removedMajor != 0 || removedMinor != 0 {
|
||||
return fmt.Sprintf("%d.%d", removedMajor, removedMinor)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// WarningMessage returns a human-readable deprecation warning if the object implements APILifecycleDeprecated()
|
||||
// to indicate a non-zero deprecated major/minor version and has a populated GetObjectKind().GroupVersionKind().
|
||||
func WarningMessage(obj runtime.Object) string {
|
||||
deprecated, isDeprecated := obj.(apiLifecycleDeprecated)
|
||||
if !isDeprecated {
|
||||
return ""
|
||||
}
|
||||
|
||||
deprecatedMajor, deprecatedMinor := deprecated.APILifecycleDeprecated()
|
||||
if deprecatedMajor == 0 && deprecatedMinor == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
return ""
|
||||
}
|
||||
deprecationWarning := fmt.Sprintf("%s %s is deprecated in v%d.%d+", gvk.GroupVersion().String(), gvk.Kind, deprecatedMajor, deprecatedMinor)
|
||||
|
||||
if removed, hasRemovalInfo := obj.(apiLifecycleRemoved); hasRemovalInfo {
|
||||
removedMajor, removedMinor := removed.APILifecycleRemoved()
|
||||
if removedMajor != 0 || removedMinor != 0 {
|
||||
deprecationWarning = deprecationWarning + fmt.Sprintf(", unavailable in v%d.%d+", removedMajor, removedMinor)
|
||||
}
|
||||
}
|
||||
|
||||
if replaced, hasReplacement := obj.(apiLifecycleReplacement); hasReplacement {
|
||||
replacement := replaced.APILifecycleReplacement()
|
||||
if !replacement.Empty() {
|
||||
deprecationWarning = deprecationWarning + fmt.Sprintf("; use %s %s", replacement.GroupVersion().String(), replacement.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
return deprecationWarning
|
||||
}
|
5
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/OWNERS
generated
vendored
Normal file
5
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/OWNERS
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- alexzielenski
|
||||
- jefftree
|
72
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go
generated
vendored
Normal file
72
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Addresses interface {
|
||||
ServerAddressByClientCIDRs(net.IP) []metav1.ServerAddressByClientCIDR
|
||||
}
|
||||
|
||||
// DefaultAddresses is a default implementation of Addresses that will work in most cases
|
||||
type DefaultAddresses struct {
|
||||
// CIDRRules is a list of CIDRs and Addresses to use if a client is in the range
|
||||
CIDRRules []CIDRRule
|
||||
|
||||
// DefaultAddress is the address (hostname or IP and port) that should be used in
|
||||
// if no CIDR matches more specifically.
|
||||
DefaultAddress string
|
||||
}
|
||||
|
||||
// CIDRRule is a rule for adding an alternate path to the master based on matching CIDR
|
||||
type CIDRRule struct {
|
||||
IPRange net.IPNet
|
||||
|
||||
// Address is the address (hostname or IP and port) that should be used in
|
||||
// if this CIDR matches
|
||||
Address string
|
||||
}
|
||||
|
||||
func (d DefaultAddresses) ServerAddressByClientCIDRs(clientIP net.IP) []metav1.ServerAddressByClientCIDR {
|
||||
addressCIDRMap := []metav1.ServerAddressByClientCIDR{
|
||||
{
|
||||
ClientCIDR: "0.0.0.0/0",
|
||||
ServerAddress: d.DefaultAddress,
|
||||
},
|
||||
}
|
||||
|
||||
for _, rule := range d.CIDRRules {
|
||||
addressCIDRMap = append(addressCIDRMap, rule.ServerAddressByClientCIDRs(clientIP)...)
|
||||
}
|
||||
return addressCIDRMap
|
||||
}
|
||||
|
||||
func (d CIDRRule) ServerAddressByClientCIDRs(clientIP net.IP) []metav1.ServerAddressByClientCIDR {
|
||||
addressCIDRMap := []metav1.ServerAddressByClientCIDR{}
|
||||
|
||||
if d.IPRange.Contains(clientIP) {
|
||||
addressCIDRMap = append(addressCIDRMap, metav1.ServerAddressByClientCIDR{
|
||||
ClientCIDR: d.IPRange.String(),
|
||||
ServerAddress: d.Address,
|
||||
})
|
||||
}
|
||||
return addressCIDRMap
|
||||
}
|
85
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go
generated
vendored
Normal file
85
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/etag.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aggregated
|
||||
|
||||
import (
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
)
|
||||
|
||||
// This file exposes helper functions used for calculating the E-Tag header
|
||||
// used in discovery endpoint responses
|
||||
|
||||
// Attaches Cache-Busting functionality to an endpoint
|
||||
// - Sets ETag header to provided hash
|
||||
// - Replies with 304 Not Modified, if If-None-Match header matches hash
|
||||
//
|
||||
// hash should be the value of calculateETag on object. If hash is empty, then
|
||||
// the object is simply serialized without E-Tag functionality
|
||||
func ServeHTTPWithETag(
|
||||
object runtime.Object,
|
||||
hash string,
|
||||
targetGV schema.GroupVersion,
|
||||
serializer runtime.NegotiatedSerializer,
|
||||
w http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
// ETag must be enclosed in double quotes:
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
|
||||
quotedHash := strconv.Quote(hash)
|
||||
w.Header().Set("ETag", quotedHash)
|
||||
w.Header().Set("Vary", "Accept")
|
||||
w.Header().Set("Cache-Control", "public")
|
||||
|
||||
// If Request includes If-None-Match and matches hash, reply with 304
|
||||
// Otherwise, we delegate to the handler for actual content
|
||||
//
|
||||
// According to documentation, An Etag within an If-None-Match
|
||||
// header will be enclosed within double quotes:
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-None-Match#directives
|
||||
if clientCachedHash := req.Header.Get("If-None-Match"); quotedHash == clientCachedHash {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
responsewriters.WriteObjectNegotiated(
|
||||
serializer,
|
||||
DiscoveryEndpointRestrictions,
|
||||
targetGV,
|
||||
w,
|
||||
req,
|
||||
http.StatusOK,
|
||||
object,
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
func calculateETag(resources interface{}) (string, error) {
|
||||
serialized, err := json.Marshal(resources)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%X", sha512.Sum512(serialized)), nil
|
||||
}
|
175
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/fake.go
generated
vendored
Normal file
175
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/fake.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aggregated
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
type FakeResourceManager interface {
|
||||
ResourceManager
|
||||
Expect() ResourceManager
|
||||
|
||||
HasExpectedNumberActions() bool
|
||||
Validate() error
|
||||
WaitForActions(ctx context.Context, timeout time.Duration) error
|
||||
}
|
||||
|
||||
func NewFakeResourceManager() FakeResourceManager {
|
||||
return &fakeResourceManager{}
|
||||
}
|
||||
|
||||
// a resource manager with helper functions for checking the actions
|
||||
// match expected. For Use in tests
|
||||
type fakeResourceManager struct {
|
||||
recorderResourceManager
|
||||
expect recorderResourceManager
|
||||
}
|
||||
|
||||
// a resource manager which instead of managing a discovery document,
|
||||
// simply records the calls to its interface functoins for testing
|
||||
type recorderResourceManager struct {
|
||||
lock sync.RWMutex
|
||||
Actions []recorderResourceManagerAction
|
||||
}
|
||||
|
||||
var _ ResourceManager = &fakeResourceManager{}
|
||||
var _ ResourceManager = &recorderResourceManager{}
|
||||
|
||||
// Storage type for a call to the resource manager
|
||||
type recorderResourceManagerAction struct {
|
||||
Type string
|
||||
Group string
|
||||
Version string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (f *fakeResourceManager) Expect() ResourceManager {
|
||||
return &f.expect
|
||||
}
|
||||
|
||||
func (f *fakeResourceManager) HasExpectedNumberActions() bool {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
f.expect.lock.RLock()
|
||||
defer f.expect.lock.RUnlock()
|
||||
|
||||
return len(f.Actions) >= len(f.expect.Actions)
|
||||
}
|
||||
|
||||
func (f *fakeResourceManager) Validate() error {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
|
||||
f.expect.lock.RLock()
|
||||
defer f.expect.lock.RUnlock()
|
||||
|
||||
if !reflect.DeepEqual(f.expect.Actions, f.Actions) {
|
||||
return errors.New(cmp.Diff(f.expect.Actions, f.Actions))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceManager) WaitForActions(ctx context.Context, timeout time.Duration) error {
|
||||
err := wait.PollImmediateWithContext(
|
||||
ctx,
|
||||
100*time.Millisecond, // try every 100ms
|
||||
timeout, // timeout after timeout
|
||||
func(ctx context.Context) (done bool, err error) {
|
||||
if f.HasExpectedNumberActions() {
|
||||
return true, f.Validate()
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *recorderResourceManager) SetGroupVersionPriority(gv metav1.GroupVersion, grouppriority, versionpriority int) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.Actions = append(f.Actions, recorderResourceManagerAction{
|
||||
Type: "SetGroupVersionPriority",
|
||||
Group: gv.Group,
|
||||
Version: gv.Version,
|
||||
Value: versionpriority,
|
||||
})
|
||||
}
|
||||
|
||||
func (f *recorderResourceManager) AddGroupVersion(groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.Actions = append(f.Actions, recorderResourceManagerAction{
|
||||
Type: "AddGroupVersion",
|
||||
Group: groupName,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
func (f *recorderResourceManager) RemoveGroup(groupName string) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.Actions = append(f.Actions, recorderResourceManagerAction{
|
||||
Type: "RemoveGroup",
|
||||
Group: groupName,
|
||||
})
|
||||
|
||||
}
|
||||
func (f *recorderResourceManager) RemoveGroupVersion(gv metav1.GroupVersion) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.Actions = append(f.Actions, recorderResourceManagerAction{
|
||||
Type: "RemoveGroupVersion",
|
||||
Group: gv.Group,
|
||||
Version: gv.Version,
|
||||
})
|
||||
|
||||
}
|
||||
func (f *recorderResourceManager) SetGroups(values []apidiscoveryv2.APIGroupDiscovery) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.Actions = append(f.Actions, recorderResourceManagerAction{
|
||||
Type: "SetGroups",
|
||||
Value: values,
|
||||
})
|
||||
}
|
||||
func (f *recorderResourceManager) WebService() *restful.WebService {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *recorderResourceManager) ServeHTTP(http.ResponseWriter, *http.Request) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (f *recorderResourceManager) WithSource(source Source) ResourceManager {
|
||||
panic("unimplemented")
|
||||
}
|
567
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go
generated
vendored
Normal file
567
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go
generated
vendored
Normal file
@ -0,0 +1,567 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aggregated
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
apidiscoveryv2conversion "k8s.io/apiserver/pkg/apis/apidiscovery/v2"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
|
||||
"sync/atomic"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type Source uint
|
||||
|
||||
// The GroupVersion from the lowest Source takes precedence
|
||||
const (
|
||||
AggregatorSource Source = 0
|
||||
BuiltinSource Source = 100
|
||||
CRDSource Source = 200
|
||||
)
|
||||
|
||||
// This handler serves the /apis endpoint for an aggregated list of
|
||||
// api resources indexed by their group version.
|
||||
type ResourceManager interface {
|
||||
// Adds knowledge of the given groupversion to the discovery document
|
||||
// If it was already being tracked, updates the stored APIVersionDiscovery
|
||||
// Thread-safe
|
||||
AddGroupVersion(groupName string, value apidiscoveryv2.APIVersionDiscovery)
|
||||
|
||||
// Sets a priority to be used while sorting a specific group and
|
||||
// group-version. If two versions report different priorities for
|
||||
// the group, the higher one will be used. If the group is not
|
||||
// known, the priority is ignored. The priority for this version
|
||||
// is forgotten once the group-version is forgotten
|
||||
SetGroupVersionPriority(gv metav1.GroupVersion, grouppriority, versionpriority int)
|
||||
|
||||
// Removes all group versions for a given group
|
||||
// Thread-safe
|
||||
RemoveGroup(groupName string)
|
||||
|
||||
// Removes a specific groupversion. If all versions of a group have been
|
||||
// removed, then the entire group is unlisted.
|
||||
// Thread-safe
|
||||
RemoveGroupVersion(gv metav1.GroupVersion)
|
||||
|
||||
// Resets the manager's known list of group-versions and replaces them
|
||||
// with the given groups
|
||||
// Thread-Safe
|
||||
SetGroups([]apidiscoveryv2.APIGroupDiscovery)
|
||||
|
||||
// Returns the same resource manager using a different source
|
||||
// The source is used to decide how to de-duplicate groups.
|
||||
// The group from the least-numbered source is used
|
||||
WithSource(source Source) ResourceManager
|
||||
|
||||
http.Handler
|
||||
}
|
||||
|
||||
type resourceManager struct {
|
||||
source Source
|
||||
*resourceDiscoveryManager
|
||||
}
|
||||
|
||||
func (rm resourceManager) AddGroupVersion(groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
rm.resourceDiscoveryManager.AddGroupVersion(rm.source, groupName, value)
|
||||
}
|
||||
func (rm resourceManager) SetGroupVersionPriority(gv metav1.GroupVersion, grouppriority, versionpriority int) {
|
||||
rm.resourceDiscoveryManager.SetGroupVersionPriority(rm.source, gv, grouppriority, versionpriority)
|
||||
}
|
||||
func (rm resourceManager) RemoveGroup(groupName string) {
|
||||
rm.resourceDiscoveryManager.RemoveGroup(rm.source, groupName)
|
||||
}
|
||||
func (rm resourceManager) RemoveGroupVersion(gv metav1.GroupVersion) {
|
||||
rm.resourceDiscoveryManager.RemoveGroupVersion(rm.source, gv)
|
||||
}
|
||||
func (rm resourceManager) SetGroups(groups []apidiscoveryv2.APIGroupDiscovery) {
|
||||
rm.resourceDiscoveryManager.SetGroups(rm.source, groups)
|
||||
}
|
||||
|
||||
func (rm resourceManager) WithSource(source Source) ResourceManager {
|
||||
return resourceManager{
|
||||
source: source,
|
||||
resourceDiscoveryManager: rm.resourceDiscoveryManager,
|
||||
}
|
||||
}
|
||||
|
||||
type groupKey struct {
|
||||
name string
|
||||
|
||||
// Source identifies where this group came from and dictates which group
|
||||
// among duplicates is chosen to be used for discovery.
|
||||
source Source
|
||||
}
|
||||
|
||||
type groupVersionKey struct {
|
||||
metav1.GroupVersion
|
||||
source Source
|
||||
}
|
||||
|
||||
type resourceDiscoveryManager struct {
|
||||
serializer runtime.NegotiatedSerializer
|
||||
// cache is an atomic pointer to avoid the use of locks
|
||||
cache atomic.Pointer[cachedGroupList]
|
||||
|
||||
serveHTTPFunc http.HandlerFunc
|
||||
|
||||
// Writes protected by the lock.
|
||||
// List of all apigroups & resources indexed by the resource manager
|
||||
lock sync.RWMutex
|
||||
apiGroups map[groupKey]*apidiscoveryv2.APIGroupDiscovery
|
||||
versionPriorities map[groupVersionKey]priorityInfo
|
||||
}
|
||||
|
||||
type priorityInfo struct {
|
||||
GroupPriorityMinimum int
|
||||
VersionPriority int
|
||||
}
|
||||
|
||||
func NewResourceManager(path string) ResourceManager {
|
||||
scheme := runtime.NewScheme()
|
||||
utilruntime.Must(apidiscoveryv2.AddToScheme(scheme))
|
||||
utilruntime.Must(apidiscoveryv2beta1.AddToScheme(scheme))
|
||||
// Register conversion for apidiscovery
|
||||
utilruntime.Must(apidiscoveryv2conversion.RegisterConversions(scheme))
|
||||
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
rdm := &resourceDiscoveryManager{
|
||||
serializer: codecs,
|
||||
versionPriorities: make(map[groupVersionKey]priorityInfo),
|
||||
}
|
||||
rdm.serveHTTPFunc = metrics.InstrumentHandlerFunc("GET",
|
||||
/* group = */ "",
|
||||
/* version = */ "",
|
||||
/* resource = */ "",
|
||||
/* subresource = */ path,
|
||||
/* scope = */ "",
|
||||
/* component = */ metrics.APIServerComponent,
|
||||
/* deprecated */ false,
|
||||
/* removedRelease */ "",
|
||||
rdm.serveHTTP)
|
||||
return resourceManager{
|
||||
source: BuiltinSource,
|
||||
resourceDiscoveryManager: rdm,
|
||||
}
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) SetGroupVersionPriority(source Source, gv metav1.GroupVersion, groupPriorityMinimum, versionPriority int) {
|
||||
rdm.lock.Lock()
|
||||
defer rdm.lock.Unlock()
|
||||
|
||||
key := groupVersionKey{
|
||||
GroupVersion: gv,
|
||||
source: source,
|
||||
}
|
||||
rdm.versionPriorities[key] = priorityInfo{
|
||||
GroupPriorityMinimum: groupPriorityMinimum,
|
||||
VersionPriority: versionPriority,
|
||||
}
|
||||
rdm.cache.Store(nil)
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) SetGroups(source Source, groups []apidiscoveryv2.APIGroupDiscovery) {
|
||||
rdm.lock.Lock()
|
||||
defer rdm.lock.Unlock()
|
||||
|
||||
rdm.apiGroups = nil
|
||||
rdm.cache.Store(nil)
|
||||
|
||||
for _, group := range groups {
|
||||
for _, version := range group.Versions {
|
||||
rdm.addGroupVersionLocked(source, group.Name, version)
|
||||
}
|
||||
}
|
||||
|
||||
// Filter unused out priority entries
|
||||
for gv := range rdm.versionPriorities {
|
||||
key := groupKey{
|
||||
source: source,
|
||||
name: gv.Group,
|
||||
}
|
||||
entry, exists := rdm.apiGroups[key]
|
||||
if !exists {
|
||||
delete(rdm.versionPriorities, gv)
|
||||
continue
|
||||
}
|
||||
|
||||
containsVersion := false
|
||||
|
||||
for _, v := range entry.Versions {
|
||||
if v.Version == gv.Version {
|
||||
containsVersion = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !containsVersion {
|
||||
delete(rdm.versionPriorities, gv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) AddGroupVersion(source Source, groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
rdm.lock.Lock()
|
||||
defer rdm.lock.Unlock()
|
||||
|
||||
rdm.addGroupVersionLocked(source, groupName, value)
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupName string, value apidiscoveryv2.APIVersionDiscovery) {
|
||||
|
||||
if rdm.apiGroups == nil {
|
||||
rdm.apiGroups = make(map[groupKey]*apidiscoveryv2.APIGroupDiscovery)
|
||||
}
|
||||
|
||||
key := groupKey{
|
||||
source: source,
|
||||
name: groupName,
|
||||
}
|
||||
|
||||
if existing, groupExists := rdm.apiGroups[key]; groupExists {
|
||||
// If this version already exists, replace it
|
||||
versionExists := false
|
||||
|
||||
// Not very efficient, but in practice there are generally not many versions
|
||||
for i := range existing.Versions {
|
||||
if existing.Versions[i].Version == value.Version {
|
||||
// The new gv is the exact same as what is already in
|
||||
// the map. This is a noop and cache should not be
|
||||
// invalidated.
|
||||
if reflect.DeepEqual(existing.Versions[i], value) {
|
||||
return
|
||||
}
|
||||
|
||||
existing.Versions[i] = value
|
||||
versionExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !versionExists {
|
||||
existing.Versions = append(existing.Versions, value)
|
||||
}
|
||||
|
||||
} else {
|
||||
group := &apidiscoveryv2.APIGroupDiscovery{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: groupName,
|
||||
},
|
||||
Versions: []apidiscoveryv2.APIVersionDiscovery{value},
|
||||
}
|
||||
rdm.apiGroups[key] = group
|
||||
}
|
||||
klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version)
|
||||
|
||||
gv := metav1.GroupVersion{Group: groupName, Version: value.Version}
|
||||
gvKey := groupVersionKey{
|
||||
GroupVersion: gv,
|
||||
source: source,
|
||||
}
|
||||
if _, ok := rdm.versionPriorities[gvKey]; !ok {
|
||||
rdm.versionPriorities[gvKey] = priorityInfo{
|
||||
GroupPriorityMinimum: 1000,
|
||||
VersionPriority: 15,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset response document so it is recreated lazily
|
||||
rdm.cache.Store(nil)
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) RemoveGroupVersion(source Source, apiGroup metav1.GroupVersion) {
|
||||
rdm.lock.Lock()
|
||||
defer rdm.lock.Unlock()
|
||||
|
||||
key := groupKey{
|
||||
source: source,
|
||||
name: apiGroup.Group,
|
||||
}
|
||||
|
||||
group, exists := rdm.apiGroups[key]
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
modified := false
|
||||
for i := range group.Versions {
|
||||
if group.Versions[i].Version == apiGroup.Version {
|
||||
group.Versions = append(group.Versions[:i], group.Versions[i+1:]...)
|
||||
modified = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// If no modification was done, cache does not need to be cleared
|
||||
if !modified {
|
||||
return
|
||||
}
|
||||
|
||||
gvKey := groupVersionKey{
|
||||
GroupVersion: apiGroup,
|
||||
source: source,
|
||||
}
|
||||
|
||||
delete(rdm.versionPriorities, gvKey)
|
||||
if len(group.Versions) == 0 {
|
||||
delete(rdm.apiGroups, key)
|
||||
}
|
||||
|
||||
// Reset response document so it is recreated lazily
|
||||
rdm.cache.Store(nil)
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) RemoveGroup(source Source, groupName string) {
|
||||
rdm.lock.Lock()
|
||||
defer rdm.lock.Unlock()
|
||||
|
||||
key := groupKey{
|
||||
source: source,
|
||||
name: groupName,
|
||||
}
|
||||
|
||||
delete(rdm.apiGroups, key)
|
||||
|
||||
for k := range rdm.versionPriorities {
|
||||
if k.Group == groupName && k.source == source {
|
||||
delete(rdm.versionPriorities, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset response document so it is recreated lazily
|
||||
rdm.cache.Store(nil)
|
||||
}
|
||||
|
||||
// Prepares the api group list for serving by converting them from map into
|
||||
// list and sorting them according to insertion order
|
||||
func (rdm *resourceDiscoveryManager) calculateAPIGroupsLocked() []apidiscoveryv2.APIGroupDiscovery {
|
||||
regenerationCounter.Inc()
|
||||
// Re-order the apiGroups by their priority.
|
||||
groups := []apidiscoveryv2.APIGroupDiscovery{}
|
||||
|
||||
groupsToUse := map[string]apidiscoveryv2.APIGroupDiscovery{}
|
||||
sourcesUsed := map[metav1.GroupVersion]Source{}
|
||||
|
||||
for key, group := range rdm.apiGroups {
|
||||
if existing, ok := groupsToUse[key.name]; ok {
|
||||
for _, v := range group.Versions {
|
||||
gv := metav1.GroupVersion{Group: key.name, Version: v.Version}
|
||||
|
||||
// Skip groupversions we've already seen before. Only DefaultSource
|
||||
// takes precedence
|
||||
if usedSource, seen := sourcesUsed[gv]; seen && key.source >= usedSource {
|
||||
continue
|
||||
} else if seen {
|
||||
// Find the index of the duplicate version and replace
|
||||
for i := 0; i < len(existing.Versions); i++ {
|
||||
if existing.Versions[i].Version == v.Version {
|
||||
existing.Versions[i] = v
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// New group-version, just append
|
||||
existing.Versions = append(existing.Versions, v)
|
||||
}
|
||||
|
||||
sourcesUsed[gv] = key.source
|
||||
groupsToUse[key.name] = existing
|
||||
}
|
||||
// Check to see if we have overlapping versions. If we do, take the one
|
||||
// with highest source precedence
|
||||
} else {
|
||||
groupsToUse[key.name] = *group.DeepCopy()
|
||||
for _, v := range group.Versions {
|
||||
gv := metav1.GroupVersion{Group: key.name, Version: v.Version}
|
||||
sourcesUsed[gv] = key.source
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, group := range groupsToUse {
|
||||
|
||||
// Re-order versions based on their priority. Use kube-aware string
|
||||
// comparison as a tie breaker
|
||||
sort.SliceStable(group.Versions, func(i, j int) bool {
|
||||
iVersion := group.Versions[i].Version
|
||||
jVersion := group.Versions[j].Version
|
||||
|
||||
iGV := metav1.GroupVersion{Group: group.Name, Version: iVersion}
|
||||
jGV := metav1.GroupVersion{Group: group.Name, Version: jVersion}
|
||||
|
||||
iSource := sourcesUsed[iGV]
|
||||
jSource := sourcesUsed[jGV]
|
||||
|
||||
iPriority := rdm.versionPriorities[groupVersionKey{iGV, iSource}].VersionPriority
|
||||
jPriority := rdm.versionPriorities[groupVersionKey{jGV, jSource}].VersionPriority
|
||||
|
||||
// Sort by version string comparator if priority is equal
|
||||
if iPriority == jPriority {
|
||||
return version.CompareKubeAwareVersionStrings(iVersion, jVersion) > 0
|
||||
}
|
||||
|
||||
// i sorts before j if it has a higher priority
|
||||
return iPriority > jPriority
|
||||
})
|
||||
|
||||
groups = append(groups, group)
|
||||
}
|
||||
|
||||
// For each group, determine the highest minimum group priority and use that
|
||||
priorities := map[string]int{}
|
||||
for gv, info := range rdm.versionPriorities {
|
||||
if source := sourcesUsed[gv.GroupVersion]; source != gv.source {
|
||||
continue
|
||||
}
|
||||
|
||||
if existing, exists := priorities[gv.Group]; exists {
|
||||
if existing < info.GroupPriorityMinimum {
|
||||
priorities[gv.Group] = info.GroupPriorityMinimum
|
||||
}
|
||||
} else {
|
||||
priorities[gv.Group] = info.GroupPriorityMinimum
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(groups, func(i, j int) bool {
|
||||
iName := groups[i].Name
|
||||
jName := groups[j].Name
|
||||
|
||||
// Default to 0 priority by default
|
||||
iPriority := priorities[iName]
|
||||
jPriority := priorities[jName]
|
||||
|
||||
// Sort discovery based on apiservice priority.
|
||||
// Duplicated from staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helpers.go
|
||||
if iPriority == jPriority {
|
||||
// Equal priority uses name to break ties
|
||||
return iName < jName
|
||||
}
|
||||
|
||||
// i sorts before j if it has a higher priority
|
||||
return iPriority > jPriority
|
||||
})
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
// Fetches from cache if it exists. If cache is empty, create it.
|
||||
func (rdm *resourceDiscoveryManager) fetchFromCache() *cachedGroupList {
|
||||
rdm.lock.RLock()
|
||||
defer rdm.lock.RUnlock()
|
||||
|
||||
cacheLoad := rdm.cache.Load()
|
||||
if cacheLoad != nil {
|
||||
return cacheLoad
|
||||
}
|
||||
response := apidiscoveryv2.APIGroupDiscoveryList{
|
||||
Items: rdm.calculateAPIGroupsLocked(),
|
||||
}
|
||||
etag, err := calculateETag(response)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to calculate etag for discovery document: %s", etag)
|
||||
etag = ""
|
||||
}
|
||||
cached := &cachedGroupList{
|
||||
cachedResponse: response,
|
||||
cachedResponseETag: etag,
|
||||
}
|
||||
rdm.cache.Store(cached)
|
||||
return cached
|
||||
}
|
||||
|
||||
type cachedGroupList struct {
|
||||
cachedResponse apidiscoveryv2.APIGroupDiscoveryList
|
||||
// etag is calculated based on a SHA hash of only the JSON object.
|
||||
// A response via different Accept encodings (eg: protobuf, json) will
|
||||
// yield the same etag. This is okay because Accept is part of the Vary header.
|
||||
// Per RFC7231 a client must only cache a response etag pair if the header field
|
||||
// matches as indicated by the Vary field. Thus, protobuf and json and other Accept
|
||||
// encodings will not be cached as the same response despite having the same etag.
|
||||
cachedResponseETag string
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
rdm.serveHTTPFunc(resp, req)
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) serveHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
cache := rdm.fetchFromCache()
|
||||
response := cache.cachedResponse
|
||||
etag := cache.cachedResponseETag
|
||||
|
||||
mediaType, _, err := negotiation.NegotiateOutputMediaType(req, rdm.serializer, DiscoveryEndpointRestrictions)
|
||||
if err != nil {
|
||||
// Should never happen. wrapper.go will only proxy requests to this
|
||||
// handler if the media type passes DiscoveryEndpointRestrictions
|
||||
utilruntime.HandleError(err)
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var targetGV schema.GroupVersion
|
||||
if mediaType.Convert == nil ||
|
||||
(mediaType.Convert.GroupVersion() != apidiscoveryv2.SchemeGroupVersion &&
|
||||
mediaType.Convert.GroupVersion() != apidiscoveryv2beta1.SchemeGroupVersion) {
|
||||
utilruntime.HandleError(fmt.Errorf("expected aggregated discovery group version, got group: %s, version %s", mediaType.Convert.Group, mediaType.Convert.Version))
|
||||
resp.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
targetGV = mediaType.Convert.GroupVersion()
|
||||
|
||||
if len(etag) > 0 {
|
||||
// Use proper e-tag headers if one is available
|
||||
ServeHTTPWithETag(
|
||||
&response,
|
||||
etag,
|
||||
targetGV,
|
||||
rdm.serializer,
|
||||
resp,
|
||||
req,
|
||||
)
|
||||
} else {
|
||||
// Default to normal response in rare case etag is
|
||||
// not cached with the object for some reason.
|
||||
responsewriters.WriteObjectNegotiated(
|
||||
rdm.serializer,
|
||||
DiscoveryEndpointRestrictions,
|
||||
targetGV,
|
||||
resp,
|
||||
req,
|
||||
http.StatusOK,
|
||||
&response,
|
||||
true,
|
||||
)
|
||||
}
|
||||
}
|
36
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/metrics.go
generated
vendored
Normal file
36
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/metrics.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aggregated
|
||||
|
||||
import (
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
var (
|
||||
regenerationCounter = metrics.NewCounter(
|
||||
&metrics.CounterOpts{
|
||||
Name: "aggregator_discovery_aggregation_count_total",
|
||||
Help: "Counter of number of times discovery was aggregated",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
legacyregistry.MustRegister(regenerationCounter)
|
||||
}
|
43
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/negotiation.go
generated
vendored
Normal file
43
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/negotiation.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aggregated
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// Interface is from "k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
|
||||
// DiscoveryEndpointRestrictions allows requests to /apis to provide a Content Negotiation GVK for aggregated discovery.
|
||||
var DiscoveryEndpointRestrictions = discoveryEndpointRestrictions{}
|
||||
|
||||
type discoveryEndpointRestrictions struct{}
|
||||
|
||||
func (discoveryEndpointRestrictions) AllowsMediaTypeTransform(mimeType string, mimeSubType string, gvk *schema.GroupVersionKind) bool {
|
||||
return IsAggregatedDiscoveryGVK(gvk)
|
||||
}
|
||||
|
||||
func (discoveryEndpointRestrictions) AllowsServerVersion(string) bool { return false }
|
||||
func (discoveryEndpointRestrictions) AllowsStreamSchema(s string) bool { return s == "watch" }
|
||||
|
||||
// IsAggregatedDiscoveryGVK checks if a provided GVK is the GVK for serving aggregated discovery.
|
||||
func IsAggregatedDiscoveryGVK(gvk *schema.GroupVersionKind) bool {
|
||||
if gvk != nil {
|
||||
return gvk.Group == "apidiscovery.k8s.io" && (gvk.Version == "v2beta1" || gvk.Version == "v2") && gvk.Kind == "APIGroupDiscoveryList"
|
||||
}
|
||||
return false
|
||||
}
|
81
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/wrapper.go
generated
vendored
Normal file
81
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/wrapper.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aggregated
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
apidiscoveryv2beta1 "k8s.io/api/apidiscovery/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
)
|
||||
|
||||
type WrappedHandler struct {
|
||||
s runtime.NegotiatedSerializer
|
||||
handler http.Handler
|
||||
aggHandler http.Handler
|
||||
}
|
||||
|
||||
func (wrapped *WrappedHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) {
|
||||
mediaType, _ := negotiation.NegotiateMediaTypeOptions(req.Header.Get("Accept"), wrapped.s.SupportedMediaTypes(), DiscoveryEndpointRestrictions)
|
||||
// mediaType.Convert looks at the request accept headers and is used to control whether the discovery document will be aggregated.
|
||||
if IsAggregatedDiscoveryGVK(mediaType.Convert) {
|
||||
wrapped.aggHandler.ServeHTTP(resp, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
wrapped.handler.ServeHTTP(resp, req)
|
||||
}
|
||||
|
||||
func (wrapped *WrappedHandler) restfulHandle(req *restful.Request, resp *restful.Response) {
|
||||
wrapped.ServeHTTP(resp.ResponseWriter, req.Request)
|
||||
}
|
||||
|
||||
func (wrapped *WrappedHandler) GenerateWebService(prefix string, returnType interface{}) *restful.WebService {
|
||||
mediaTypes, _ := negotiation.MediaTypesForSerializer(wrapped.s)
|
||||
ws := new(restful.WebService)
|
||||
ws.Path(prefix)
|
||||
ws.Doc("get available API versions")
|
||||
ws.Route(ws.GET("/").To(wrapped.restfulHandle).
|
||||
Doc("get available API versions").
|
||||
Operation("getAPIVersions").
|
||||
Produces(mediaTypes...).
|
||||
Consumes(mediaTypes...).
|
||||
Writes(returnType))
|
||||
return ws
|
||||
}
|
||||
|
||||
// WrapAggregatedDiscoveryToHandler wraps a handler with an option to
|
||||
// emit the aggregated discovery by passing in the aggregated
|
||||
// discovery type in content negotiation headers: eg: (Accept:
|
||||
// application/json;v=v2;g=apidiscovery.k8s.io;as=APIGroupDiscoveryList)
|
||||
func WrapAggregatedDiscoveryToHandler(handler http.Handler, aggHandler http.Handler) *WrappedHandler {
|
||||
scheme := runtime.NewScheme()
|
||||
utilruntime.Must(apidiscoveryv2.AddToScheme(scheme))
|
||||
utilruntime.Must(apidiscoveryv2beta1.AddToScheme(scheme))
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
return &WrappedHandler{codecs, handler, aggHandler}
|
||||
}
|
73
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go
generated
vendored
Normal file
73
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
)
|
||||
|
||||
// APIGroupHandler creates a webservice serving the supported versions, preferred version, and name
|
||||
// of a group. E.g., such a web service will be registered at /apis/extensions.
|
||||
type APIGroupHandler struct {
|
||||
serializer runtime.NegotiatedSerializer
|
||||
group metav1.APIGroup
|
||||
}
|
||||
|
||||
func NewAPIGroupHandler(serializer runtime.NegotiatedSerializer, group metav1.APIGroup) *APIGroupHandler {
|
||||
if keepUnversioned(group.Name) {
|
||||
// Because in release 1.1, /apis/extensions returns response with empty
|
||||
// APIVersion, we use stripVersionNegotiatedSerializer to keep the
|
||||
// response backwards compatible.
|
||||
serializer = stripVersionNegotiatedSerializer{serializer}
|
||||
}
|
||||
|
||||
return &APIGroupHandler{
|
||||
serializer: serializer,
|
||||
group: group,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *APIGroupHandler) WebService() *restful.WebService {
|
||||
mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer)
|
||||
ws := new(restful.WebService)
|
||||
ws.Path(APIGroupPrefix + "/" + s.group.Name)
|
||||
ws.Doc("get information of a group")
|
||||
ws.Route(ws.GET("/").To(s.handle).
|
||||
Doc("get information of a group").
|
||||
Operation("getAPIGroup").
|
||||
Produces(mediaTypes...).
|
||||
Consumes(mediaTypes...).
|
||||
Writes(metav1.APIGroup{}))
|
||||
return ws
|
||||
}
|
||||
|
||||
// handle returns a handler which will return the api.GroupAndVersion of the group.
|
||||
func (s *APIGroupHandler) handle(req *restful.Request, resp *restful.Response) {
|
||||
s.ServeHTTP(resp.ResponseWriter, req.Request)
|
||||
}
|
||||
|
||||
func (s *APIGroupHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK, &s.group, false)
|
||||
}
|
80
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go
generated
vendored
Normal file
80
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
)
|
||||
|
||||
// legacyRootAPIHandler creates a webservice serving api group discovery.
|
||||
type legacyRootAPIHandler struct {
|
||||
// addresses is used to build cluster IPs for discovery.
|
||||
addresses Addresses
|
||||
apiPrefix string
|
||||
serializer runtime.NegotiatedSerializer
|
||||
}
|
||||
|
||||
func NewLegacyRootAPIHandler(addresses Addresses, serializer runtime.NegotiatedSerializer, apiPrefix string) *legacyRootAPIHandler {
|
||||
// Because in release 1.1, /apis returns response with empty APIVersion, we
|
||||
// use stripVersionNegotiatedSerializer to keep the response backwards
|
||||
// compatible.
|
||||
serializer = stripVersionNegotiatedSerializer{serializer}
|
||||
|
||||
return &legacyRootAPIHandler{
|
||||
addresses: addresses,
|
||||
apiPrefix: apiPrefix,
|
||||
serializer: serializer,
|
||||
}
|
||||
}
|
||||
|
||||
// AddApiWebService adds a service to return the supported api versions at the legacy /api.
|
||||
func (s *legacyRootAPIHandler) WebService() *restful.WebService {
|
||||
mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer)
|
||||
ws := new(restful.WebService)
|
||||
ws.Path(s.apiPrefix)
|
||||
ws.Doc("get available API versions")
|
||||
ws.Route(ws.GET("/").To(s.restfulHandle).
|
||||
Doc("get available API versions").
|
||||
Operation("getAPIVersions").
|
||||
Produces(mediaTypes...).
|
||||
Consumes(mediaTypes...).
|
||||
Writes(metav1.APIVersions{}))
|
||||
return ws
|
||||
}
|
||||
|
||||
func (s *legacyRootAPIHandler) restfulHandle(req *restful.Request, resp *restful.Response) {
|
||||
s.ServeHTTP(resp.ResponseWriter, req.Request)
|
||||
}
|
||||
|
||||
func (s *legacyRootAPIHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
clientIP := utilnet.GetClientIP(req)
|
||||
apiVersions := &metav1.APIVersions{
|
||||
ServerAddressByClientCIDRs: s.addresses.ServerAddressByClientCIDRs(clientIP),
|
||||
Versions: []string{"v1"},
|
||||
}
|
||||
|
||||
responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, apiVersions, false)
|
||||
}
|
161
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go
generated
vendored
Normal file
161
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
)
|
||||
|
||||
// GroupManager is an interface that allows dynamic mutation of the existing webservice to handle
|
||||
// API groups being added or removed.
|
||||
type GroupManager interface {
|
||||
GroupLister
|
||||
|
||||
AddGroup(apiGroup metav1.APIGroup)
|
||||
RemoveGroup(groupName string)
|
||||
ServeHTTP(resp http.ResponseWriter, req *http.Request)
|
||||
WebService() *restful.WebService
|
||||
}
|
||||
|
||||
// GroupLister knows how to list APIGroups for discovery.
|
||||
type GroupLister interface {
|
||||
// Groups returns APIGroups for discovery, filling in ServerAddressByClientCIDRs
|
||||
// based on data in req.
|
||||
Groups(ctx context.Context, req *http.Request) ([]metav1.APIGroup, error)
|
||||
}
|
||||
|
||||
// rootAPIsHandler creates a webservice serving api group discovery.
|
||||
// The list of APIGroups may change while the server is running because additional resources
|
||||
// are registered or removed. It is not safe to cache the values.
|
||||
type rootAPIsHandler struct {
|
||||
// addresses is used to build cluster IPs for discovery.
|
||||
addresses Addresses
|
||||
|
||||
serializer runtime.NegotiatedSerializer
|
||||
|
||||
// Map storing information about all groups to be exposed in discovery response.
|
||||
// The map is from name to the group.
|
||||
lock sync.RWMutex
|
||||
apiGroups map[string]metav1.APIGroup
|
||||
// apiGroupNames preserves insertion order
|
||||
apiGroupNames []string
|
||||
}
|
||||
|
||||
func NewRootAPIsHandler(addresses Addresses, serializer runtime.NegotiatedSerializer) *rootAPIsHandler {
|
||||
// Because in release 1.1, /apis returns response with empty APIVersion, we
|
||||
// use stripVersionNegotiatedSerializer to keep the response backwards
|
||||
// compatible.
|
||||
serializer = stripVersionNegotiatedSerializer{serializer}
|
||||
|
||||
return &rootAPIsHandler{
|
||||
addresses: addresses,
|
||||
serializer: serializer,
|
||||
apiGroups: map[string]metav1.APIGroup{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *rootAPIsHandler) AddGroup(apiGroup metav1.APIGroup) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
_, alreadyExists := s.apiGroups[apiGroup.Name]
|
||||
|
||||
s.apiGroups[apiGroup.Name] = apiGroup
|
||||
if !alreadyExists {
|
||||
s.apiGroupNames = append(s.apiGroupNames, apiGroup.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *rootAPIsHandler) RemoveGroup(groupName string) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
delete(s.apiGroups, groupName)
|
||||
for i := range s.apiGroupNames {
|
||||
if s.apiGroupNames[i] == groupName {
|
||||
s.apiGroupNames = append(s.apiGroupNames[:i], s.apiGroupNames[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *rootAPIsHandler) Groups(ctx context.Context, req *http.Request) ([]metav1.APIGroup, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
return s.groupsLocked(ctx, req), nil
|
||||
}
|
||||
|
||||
// groupsLocked returns the APIGroupList discovery information for this handler.
|
||||
// The caller must hold the lock before invoking this method to avoid data races.
|
||||
func (s *rootAPIsHandler) groupsLocked(ctx context.Context, req *http.Request) []metav1.APIGroup {
|
||||
clientIP := utilnet.GetClientIP(req)
|
||||
serverCIDR := s.addresses.ServerAddressByClientCIDRs(clientIP)
|
||||
|
||||
orderedGroups := []metav1.APIGroup{}
|
||||
for _, groupName := range s.apiGroupNames {
|
||||
orderedGroups = append(orderedGroups, s.apiGroups[groupName])
|
||||
}
|
||||
|
||||
groups := make([]metav1.APIGroup, len(orderedGroups))
|
||||
for i := range orderedGroups {
|
||||
groups[i] = orderedGroups[i]
|
||||
groups[i].ServerAddressByClientCIDRs = serverCIDR
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
func (s *rootAPIsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
groupList := metav1.APIGroupList{Groups: s.groupsLocked(req.Context(), req)}
|
||||
|
||||
responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, resp, req, http.StatusOK, &groupList, false)
|
||||
}
|
||||
|
||||
func (s *rootAPIsHandler) restfulHandle(req *restful.Request, resp *restful.Response) {
|
||||
s.ServeHTTP(resp.ResponseWriter, req.Request)
|
||||
}
|
||||
|
||||
// WebService returns a webservice serving api group discovery.
|
||||
// Note: during the server runtime apiGroups might change.
|
||||
func (s *rootAPIsHandler) WebService() *restful.WebService {
|
||||
mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer)
|
||||
ws := new(restful.WebService)
|
||||
ws.Path(APIGroupPrefix)
|
||||
ws.Doc("get available API versions")
|
||||
ws.Route(ws.GET("/").To(s.restfulHandle).
|
||||
Doc("get available API versions").
|
||||
Operation("getAPIVersions").
|
||||
Produces(mediaTypes...).
|
||||
Consumes(mediaTypes...).
|
||||
Writes(metav1.APIGroupList{}))
|
||||
return ws
|
||||
}
|
36
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go
generated
vendored
Normal file
36
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/storageversionhash.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
// StorageVersionHash calculates the storage version hash for a
|
||||
// <group/version/kind> tuple.
|
||||
// WARNING: this function is subject to change. Clients shouldn't depend on
|
||||
// this function.
|
||||
func StorageVersionHash(group, version, kind string) string {
|
||||
gvk := group + "/" + version + "/" + kind
|
||||
bytes := sha256.Sum256([]byte(gvk))
|
||||
// Assuming there are N kinds in the cluster, and the hash is X-byte long,
|
||||
// the chance of colliding hash P(N,X) approximates to 1-e^(-(N^2)/2^(8X+1)).
|
||||
// P(10,000, 8) ~= 2.7*10^(-12), which is low enough.
|
||||
// See https://en.wikipedia.org/wiki/Birthday_problem#Approximations.
|
||||
return base64.StdEncoding.EncodeToString(bytes[:8])
|
||||
}
|
110
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go
generated
vendored
Normal file
110
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const APIGroupPrefix = "/apis"
|
||||
|
||||
func keepUnversioned(group string) bool {
|
||||
return group == "" || group == "extensions"
|
||||
}
|
||||
|
||||
// stripVersionEncoder strips APIVersion field from the encoding output. It's
|
||||
// used to keep the responses at the discovery endpoints backward compatible
|
||||
// with release-1.1, when the responses have empty APIVersion.
|
||||
type stripVersionEncoder struct {
|
||||
encoder runtime.Encoder
|
||||
serializer runtime.Serializer
|
||||
identifier runtime.Identifier
|
||||
}
|
||||
|
||||
func newStripVersionEncoder(e runtime.Encoder, s runtime.Serializer) runtime.Encoder {
|
||||
return stripVersionEncoder{
|
||||
encoder: e,
|
||||
serializer: s,
|
||||
identifier: identifier(e),
|
||||
}
|
||||
}
|
||||
|
||||
func identifier(e runtime.Encoder) runtime.Identifier {
|
||||
result := map[string]string{
|
||||
"name": "stripVersion",
|
||||
}
|
||||
if e != nil {
|
||||
result["encoder"] = string(e.Identifier())
|
||||
}
|
||||
identifier, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling identifier for stripVersionEncoder: %v", err)
|
||||
}
|
||||
return runtime.Identifier(identifier)
|
||||
}
|
||||
|
||||
func (c stripVersionEncoder) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(c.Identifier(), c.doEncode, w)
|
||||
}
|
||||
return c.doEncode(obj, w)
|
||||
}
|
||||
|
||||
func (c stripVersionEncoder) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
err := c.encoder.Encode(obj, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
roundTrippedObj, gvk, err := c.serializer.Decode(buf.Bytes(), nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gvk.Group = ""
|
||||
gvk.Version = ""
|
||||
roundTrippedObj.GetObjectKind().SetGroupVersionKind(*gvk)
|
||||
return c.serializer.Encode(roundTrippedObj, w)
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (c stripVersionEncoder) Identifier() runtime.Identifier {
|
||||
return c.identifier
|
||||
}
|
||||
|
||||
// stripVersionNegotiatedSerializer will return stripVersionEncoder when
|
||||
// EncoderForVersion is called. See comments for stripVersionEncoder.
|
||||
type stripVersionNegotiatedSerializer struct {
|
||||
runtime.NegotiatedSerializer
|
||||
}
|
||||
|
||||
func (n stripVersionNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
|
||||
serializer, ok := encoder.(runtime.Serializer)
|
||||
if !ok {
|
||||
// The stripVersionEncoder needs both an encoder and decoder, but is called from a context that doesn't have access to the
|
||||
// decoder. We do a best effort cast here (since this code path is only for backwards compatibility) to get access to the caller's
|
||||
// decoder.
|
||||
panic(fmt.Sprintf("Unable to extract serializer from %#v", encoder))
|
||||
}
|
||||
versioned := n.NegotiatedSerializer.EncoderForVersion(encoder, gv)
|
||||
return newStripVersionEncoder(versioned, serializer)
|
||||
}
|
83
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go
generated
vendored
Normal file
83
e2e/vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
)
|
||||
|
||||
type APIResourceLister interface {
|
||||
ListAPIResources() []metav1.APIResource
|
||||
}
|
||||
|
||||
type APIResourceListerFunc func() []metav1.APIResource
|
||||
|
||||
func (f APIResourceListerFunc) ListAPIResources() []metav1.APIResource {
|
||||
return f()
|
||||
}
|
||||
|
||||
// APIVersionHandler creates a webservice serving the supported resources for the version
|
||||
// E.g., such a web service will be registered at /apis/extensions/v1beta1.
|
||||
type APIVersionHandler struct {
|
||||
serializer runtime.NegotiatedSerializer
|
||||
|
||||
groupVersion schema.GroupVersion
|
||||
apiResourceLister APIResourceLister
|
||||
}
|
||||
|
||||
func NewAPIVersionHandler(serializer runtime.NegotiatedSerializer, groupVersion schema.GroupVersion, apiResourceLister APIResourceLister) *APIVersionHandler {
|
||||
if keepUnversioned(groupVersion.Group) {
|
||||
// Because in release 1.1, /apis/extensions returns response with empty
|
||||
// APIVersion, we use stripVersionNegotiatedSerializer to keep the
|
||||
// response backwards compatible.
|
||||
serializer = stripVersionNegotiatedSerializer{serializer}
|
||||
}
|
||||
|
||||
return &APIVersionHandler{
|
||||
serializer: serializer,
|
||||
groupVersion: groupVersion,
|
||||
apiResourceLister: apiResourceLister,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *APIVersionHandler) AddToWebService(ws *restful.WebService) {
|
||||
mediaTypes, _ := negotiation.MediaTypesForSerializer(s.serializer)
|
||||
ws.Route(ws.GET("/").To(s.handle).
|
||||
Doc("get available resources").
|
||||
Operation("getAPIResources").
|
||||
Produces(mediaTypes...).
|
||||
Consumes(mediaTypes...).
|
||||
Writes(metav1.APIResourceList{}))
|
||||
}
|
||||
|
||||
// handle returns a handler which will return the api.VersionAndVersion of the group.
|
||||
func (s *APIVersionHandler) handle(req *restful.Request, resp *restful.Response) {
|
||||
s.ServeHTTP(resp.ResponseWriter, req.Request)
|
||||
}
|
||||
|
||||
func (s *APIVersionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
responsewriters.WriteObjectNegotiated(s.serializer, negotiation.DefaultEndpointRestrictions, schema.GroupVersion{}, w, req, http.StatusOK,
|
||||
&metav1.APIResourceList{GroupVersion: s.groupVersion.String(), APIResources: s.apiResourceLister.ListAPIResources()}, false)
|
||||
}
|
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/doc.go
generated
vendored
Normal file
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package endpoints contains the generic code that provides a RESTful Kubernetes-style API service.
|
||||
package endpoints // import "k8s.io/apiserver/pkg/endpoints"
|
111
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go
generated
vendored
Normal file
111
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filterlatency/filterlatency.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filterlatency
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/server/httplog"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type requestFilterRecordKeyType int
|
||||
|
||||
// requestFilterRecordKey is the context key for a request filter record struct.
|
||||
const requestFilterRecordKey requestFilterRecordKeyType = iota
|
||||
|
||||
const minFilterLatencyToLog = 100 * time.Millisecond
|
||||
|
||||
type requestFilterRecord struct {
|
||||
name string
|
||||
startedTimestamp time.Time
|
||||
}
|
||||
|
||||
// withRequestFilterRecord attaches the given request filter record to the parent context.
|
||||
func withRequestFilterRecord(parent context.Context, fr *requestFilterRecord) context.Context {
|
||||
return apirequest.WithValue(parent, requestFilterRecordKey, fr)
|
||||
}
|
||||
|
||||
// requestFilterRecordFrom returns the request filter record from the given context.
|
||||
func requestFilterRecordFrom(ctx context.Context) *requestFilterRecord {
|
||||
fr, _ := ctx.Value(requestFilterRecordKey).(*requestFilterRecord)
|
||||
return fr
|
||||
}
|
||||
|
||||
// TrackStarted measures the timestamp the given handler has started execution
|
||||
// by attaching a handler to the chain.
|
||||
func TrackStarted(handler http.Handler, tp trace.TracerProvider, name string) http.Handler {
|
||||
return trackStarted(handler, tp, name, clock.RealClock{})
|
||||
}
|
||||
|
||||
// TrackCompleted measures the timestamp the given handler has completed execution and then
|
||||
// it updates the corresponding metric with the filter latency duration.
|
||||
func TrackCompleted(handler http.Handler) http.Handler {
|
||||
return trackCompleted(handler, clock.RealClock{}, func(ctx context.Context, fr *requestFilterRecord, completedAt time.Time) {
|
||||
latency := completedAt.Sub(fr.startedTimestamp)
|
||||
metrics.RecordFilterLatency(ctx, fr.name, latency)
|
||||
if klog.V(3).Enabled() && latency > minFilterLatencyToLog {
|
||||
httplog.AddKeyValue(ctx, fmt.Sprintf("fl_%s", fr.name), latency.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func trackStarted(handler http.Handler, tp trace.TracerProvider, name string, clock clock.PassiveClock) http.Handler {
|
||||
// This is a noop if the tracing is disabled, since tp will be a NoopTracerProvider
|
||||
tracer := tp.Tracer("k8s.op/apiserver/pkg/endpoints/filterlatency")
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
if fr := requestFilterRecordFrom(ctx); fr != nil {
|
||||
fr.name = name
|
||||
fr.startedTimestamp = clock.Now()
|
||||
|
||||
handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
fr := &requestFilterRecord{
|
||||
name: name,
|
||||
startedTimestamp: clock.Now(),
|
||||
}
|
||||
ctx, _ = tracer.Start(ctx, name)
|
||||
r = r.WithContext(withRequestFilterRecord(ctx, fr))
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func trackCompleted(handler http.Handler, clock clock.PassiveClock, action func(context.Context, *requestFilterRecord, time.Time)) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// The previous filter has just completed.
|
||||
completedAt := clock.Now()
|
||||
|
||||
defer handler.ServeHTTP(w, r)
|
||||
|
||||
ctx := r.Context()
|
||||
if fr := requestFilterRecordFrom(ctx); fr != nil {
|
||||
action(ctx, fr, completedAt)
|
||||
}
|
||||
trace.SpanFromContext(ctx).End()
|
||||
})
|
||||
}
|
6
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS
generated
vendored
Normal file
6
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- deads2k
|
||||
- sttts
|
||||
- soltysh
|
263
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go
generated
vendored
Normal file
263
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go
generated
vendored
Normal file
@ -0,0 +1,263 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/endpoints/responsewriter"
|
||||
)
|
||||
|
||||
// WithAudit decorates a http.Handler with audit logging information for all the
|
||||
// requests coming to the server. Audit level is decided according to requests'
|
||||
// attributes and audit policy. Logs are emitted to the audit sink to
|
||||
// process events. If sink or audit policy is nil, no decoration takes place.
|
||||
func WithAudit(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator, longRunningCheck request.LongRunningRequestCheck) http.Handler {
|
||||
if sink == nil || policy == nil {
|
||||
return handler
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ac, err := evaluatePolicyAndCreateAuditEvent(req, policy)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err))
|
||||
responsewriters.InternalError(w, req, errors.New("failed to create audit event"))
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.Enabled() {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
ev := &ac.Event
|
||||
|
||||
ctx := req.Context()
|
||||
omitStages := ac.RequestAuditConfig.OmitStages
|
||||
|
||||
ev.Stage = auditinternal.StageRequestReceived
|
||||
if processed := processAuditEvent(ctx, sink, ev, omitStages); !processed {
|
||||
audit.ApiserverAuditDroppedCounter.WithContext(ctx).Inc()
|
||||
responsewriters.InternalError(w, req, errors.New("failed to store audit event"))
|
||||
return
|
||||
}
|
||||
|
||||
// intercept the status code
|
||||
var longRunningSink audit.Sink
|
||||
if longRunningCheck != nil {
|
||||
ri, _ := request.RequestInfoFrom(ctx)
|
||||
if longRunningCheck(req, ri) {
|
||||
longRunningSink = sink
|
||||
}
|
||||
}
|
||||
respWriter := decorateResponseWriter(ctx, w, ev, longRunningSink, omitStages)
|
||||
|
||||
// send audit event when we leave this func, either via a panic or cleanly. In the case of long
|
||||
// running requests, this will be the second audit event.
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
defer panic(r)
|
||||
ev.Stage = auditinternal.StagePanic
|
||||
ev.ResponseStatus = &metav1.Status{
|
||||
Code: http.StatusInternalServerError,
|
||||
Status: metav1.StatusFailure,
|
||||
Reason: metav1.StatusReasonInternalError,
|
||||
Message: fmt.Sprintf("APIServer panic'd: %v", r),
|
||||
}
|
||||
processAuditEvent(ctx, sink, ev, omitStages)
|
||||
return
|
||||
}
|
||||
|
||||
// if no StageResponseStarted event was sent b/c neither a status code nor a body was sent, fake it here
|
||||
// But Audit-Id http header will only be sent when http.ResponseWriter.WriteHeader is called.
|
||||
fakedSuccessStatus := &metav1.Status{
|
||||
Code: http.StatusOK,
|
||||
Status: metav1.StatusSuccess,
|
||||
Message: "Connection closed early",
|
||||
}
|
||||
if ev.ResponseStatus == nil && longRunningSink != nil {
|
||||
ev.ResponseStatus = fakedSuccessStatus
|
||||
ev.Stage = auditinternal.StageResponseStarted
|
||||
processAuditEvent(ctx, longRunningSink, ev, omitStages)
|
||||
}
|
||||
|
||||
ev.Stage = auditinternal.StageResponseComplete
|
||||
if ev.ResponseStatus == nil {
|
||||
ev.ResponseStatus = fakedSuccessStatus
|
||||
}
|
||||
processAuditEvent(ctx, sink, ev, omitStages)
|
||||
}()
|
||||
handler.ServeHTTP(respWriter, req)
|
||||
})
|
||||
}
|
||||
|
||||
// evaluatePolicyAndCreateAuditEvent is responsible for evaluating the audit
|
||||
// policy configuration applicable to the request and create a new audit
|
||||
// event that will be written to the API audit log.
|
||||
// - error if anything bad happened
|
||||
func evaluatePolicyAndCreateAuditEvent(req *http.Request, policy audit.PolicyRuleEvaluator) (*audit.AuditContext, error) {
|
||||
ctx := req.Context()
|
||||
ac := audit.AuditContextFrom(ctx)
|
||||
if ac == nil {
|
||||
// Auditing not configured.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
attribs, err := GetAuthorizerAttributes(ctx)
|
||||
if err != nil {
|
||||
return ac, fmt.Errorf("failed to GetAuthorizerAttributes: %v", err)
|
||||
}
|
||||
|
||||
rac := policy.EvaluatePolicyRule(attribs)
|
||||
audit.ObservePolicyLevel(ctx, rac.Level)
|
||||
ac.RequestAuditConfig = rac
|
||||
if rac.Level == auditinternal.LevelNone {
|
||||
// Don't audit.
|
||||
return ac, nil
|
||||
}
|
||||
|
||||
requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx)
|
||||
if !ok {
|
||||
requestReceivedTimestamp = time.Now()
|
||||
}
|
||||
audit.LogRequestMetadata(ctx, req, requestReceivedTimestamp, rac.Level, attribs)
|
||||
|
||||
return ac, nil
|
||||
}
|
||||
|
||||
// writeLatencyToAnnotation writes the latency incurred in different
|
||||
// layers of the apiserver to the annotations of the audit object.
|
||||
// it should be invoked after ev.StageTimestamp has been set appropriately.
|
||||
func writeLatencyToAnnotation(ctx context.Context, ev *auditinternal.Event) {
|
||||
// we will track latency in annotation only when the total latency
|
||||
// of the given request exceeds 500ms, this is in keeping with the
|
||||
// traces in rest/handlers for create, delete, update,
|
||||
// get, list, and deletecollection.
|
||||
const threshold = 500 * time.Millisecond
|
||||
latency := ev.StageTimestamp.Time.Sub(ev.RequestReceivedTimestamp.Time)
|
||||
if latency <= threshold {
|
||||
return
|
||||
}
|
||||
|
||||
// if we are tracking latency incurred inside different layers within
|
||||
// the apiserver, add these as annotation to the audit event object.
|
||||
layerLatencies := request.AuditAnnotationsFromLatencyTrackers(ctx)
|
||||
if len(layerLatencies) == 0 {
|
||||
// latency tracking is not enabled for this request
|
||||
return
|
||||
}
|
||||
|
||||
// record the total latency for this request, for convenience.
|
||||
layerLatencies["apiserver.latency.k8s.io/total"] = latency.String()
|
||||
audit.AddAuditAnnotationsMap(ctx, layerLatencies)
|
||||
}
|
||||
|
||||
func processAuditEvent(ctx context.Context, sink audit.Sink, ev *auditinternal.Event, omitStages []auditinternal.Stage) bool {
|
||||
for _, stage := range omitStages {
|
||||
if ev.Stage == stage {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case ev.Stage == auditinternal.StageRequestReceived:
|
||||
ev.StageTimestamp = metav1.NewMicroTime(ev.RequestReceivedTimestamp.Time)
|
||||
case ev.Stage == auditinternal.StageResponseComplete:
|
||||
ev.StageTimestamp = metav1.NewMicroTime(time.Now())
|
||||
writeLatencyToAnnotation(ctx, ev)
|
||||
default:
|
||||
ev.StageTimestamp = metav1.NewMicroTime(time.Now())
|
||||
}
|
||||
|
||||
audit.ObserveEvent(ctx)
|
||||
return sink.ProcessEvents(ev)
|
||||
}
|
||||
|
||||
func decorateResponseWriter(ctx context.Context, responseWriter http.ResponseWriter, ev *auditinternal.Event, sink audit.Sink, omitStages []auditinternal.Stage) http.ResponseWriter {
|
||||
delegate := &auditResponseWriter{
|
||||
ctx: ctx,
|
||||
ResponseWriter: responseWriter,
|
||||
event: ev,
|
||||
sink: sink,
|
||||
omitStages: omitStages,
|
||||
}
|
||||
|
||||
return responsewriter.WrapForHTTP1Or2(delegate)
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = &auditResponseWriter{}
|
||||
var _ responsewriter.UserProvidedDecorator = &auditResponseWriter{}
|
||||
|
||||
// auditResponseWriter intercepts WriteHeader, sets it in the event. If the sink is set, it will
|
||||
// create immediately an event (for long running requests).
|
||||
type auditResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
ctx context.Context
|
||||
event *auditinternal.Event
|
||||
once sync.Once
|
||||
sink audit.Sink
|
||||
omitStages []auditinternal.Stage
|
||||
}
|
||||
|
||||
func (a *auditResponseWriter) Unwrap() http.ResponseWriter {
|
||||
return a.ResponseWriter
|
||||
}
|
||||
|
||||
func (a *auditResponseWriter) processCode(code int) {
|
||||
a.once.Do(func() {
|
||||
if a.event.ResponseStatus == nil {
|
||||
a.event.ResponseStatus = &metav1.Status{}
|
||||
}
|
||||
a.event.ResponseStatus.Code = int32(code)
|
||||
a.event.Stage = auditinternal.StageResponseStarted
|
||||
|
||||
if a.sink != nil {
|
||||
processAuditEvent(a.ctx, a.sink, a.event, a.omitStages)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (a *auditResponseWriter) Write(bs []byte) (int, error) {
|
||||
// the Go library calls WriteHeader internally if no code was written yet. But this will go unnoticed for us
|
||||
a.processCode(http.StatusOK)
|
||||
return a.ResponseWriter.Write(bs)
|
||||
}
|
||||
|
||||
func (a *auditResponseWriter) WriteHeader(code int) {
|
||||
a.processCode(code)
|
||||
a.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (a *auditResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
// fake a response status before protocol switch happens
|
||||
a.processCode(http.StatusSwitchingProtocols)
|
||||
|
||||
// the outer ResponseWriter object returned by WrapForHTTP1Or2 implements
|
||||
// http.Hijacker if the inner object (a.ResponseWriter) implements http.Hijacker.
|
||||
return a.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
65
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_init.go
generated
vendored
Normal file
65
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/audit_init.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// WithAuditInit initializes the audit context and attaches the Audit-ID associated with a request.
|
||||
//
|
||||
// a. If the caller does not specify a value for Audit-ID in the request header, we generate a new audit ID
|
||||
// b. We echo the Audit-ID value to the caller via the response Header 'Audit-ID'.
|
||||
func WithAuditInit(handler http.Handler) http.Handler {
|
||||
return withAuditInit(handler, func() string {
|
||||
return uuid.New().String()
|
||||
})
|
||||
}
|
||||
|
||||
func withAuditInit(handler http.Handler, newAuditIDFunc func() string) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := audit.WithAuditContext(r.Context())
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
auditID := r.Header.Get(auditinternal.HeaderAuditID)
|
||||
if len(auditID) == 0 {
|
||||
auditID = newAuditIDFunc()
|
||||
}
|
||||
|
||||
// Note: we save the user specified value of the Audit-ID header as is, no truncation is performed.
|
||||
audit.WithAuditID(ctx, types.UID(auditID))
|
||||
|
||||
// We echo the Audit-ID in to the response header.
|
||||
// It's not guaranteed Audit-ID http header is sent for all requests.
|
||||
// For example, when user run "kubectl exec", apiserver uses a proxy handler
|
||||
// to deal with the request, users can only get http headers returned by kubelet node.
|
||||
//
|
||||
// This filter will also be used by other aggregated api server(s). For an aggregated API
|
||||
// we don't want to see the same audit ID appearing more than once.
|
||||
if value := w.Header().Get(auditinternal.HeaderAuditID); len(value) == 0 {
|
||||
w.Header().Set(auditinternal.HeaderAuditID, auditID)
|
||||
}
|
||||
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
170
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go
generated
vendored
Normal file
170
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticatorfactory"
|
||||
"k8s.io/apiserver/pkg/authentication/request/headerrequest"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type authenticationRecordMetricsFunc func(context.Context, *authenticator.Response, bool, error, authenticator.Audiences, time.Time, time.Time)
|
||||
|
||||
// WithAuthentication creates an http handler that tries to authenticate the given request as a user, and then
|
||||
// stores any such user found onto the provided context for the request. If authentication fails or returns an error
|
||||
// the failed handler is used. On success, "Authorization" header is removed from the request and handler
|
||||
// is invoked to serve the request.
|
||||
func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences, requestHeaderConfig *authenticatorfactory.RequestHeaderConfig) http.Handler {
|
||||
return withAuthentication(handler, auth, failed, apiAuds, requestHeaderConfig, recordAuthenticationMetrics)
|
||||
}
|
||||
|
||||
func withAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences, requestHeaderConfig *authenticatorfactory.RequestHeaderConfig, metrics authenticationRecordMetricsFunc) http.Handler {
|
||||
if auth == nil {
|
||||
klog.Warning("Authentication is disabled")
|
||||
return handler
|
||||
}
|
||||
standardRequestHeaderConfig := &authenticatorfactory.RequestHeaderConfig{
|
||||
UsernameHeaders: headerrequest.StaticStringSlice{"X-Remote-User"},
|
||||
UIDHeaders: headerrequest.StaticStringSlice{"X-Remote-Uid"},
|
||||
GroupHeaders: headerrequest.StaticStringSlice{"X-Remote-Group"},
|
||||
ExtraHeaderPrefixes: headerrequest.StaticStringSlice{"X-Remote-Extra-"},
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
authenticationStart := time.Now()
|
||||
|
||||
if len(apiAuds) > 0 {
|
||||
req = req.WithContext(authenticator.WithAudiences(req.Context(), apiAuds))
|
||||
}
|
||||
resp, ok, err := auth.AuthenticateRequest(req)
|
||||
authenticationFinish := time.Now()
|
||||
defer func() {
|
||||
metrics(req.Context(), resp, ok, err, apiAuds, authenticationStart, authenticationFinish)
|
||||
}()
|
||||
if err != nil || !ok {
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to authenticate the request")
|
||||
}
|
||||
failed.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
if !audiencesAreAcceptable(apiAuds, resp.Audiences) {
|
||||
err = fmt.Errorf("unable to match the audience: %v , accepted: %v", resp.Audiences, apiAuds)
|
||||
klog.Error(err)
|
||||
failed.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// authorization header is not required anymore in case of a successful authentication.
|
||||
req.Header.Del("Authorization")
|
||||
|
||||
// delete standard front proxy headers
|
||||
headerrequest.ClearAuthenticationHeaders(
|
||||
req.Header,
|
||||
standardRequestHeaderConfig.UsernameHeaders,
|
||||
standardRequestHeaderConfig.UIDHeaders,
|
||||
standardRequestHeaderConfig.GroupHeaders,
|
||||
standardRequestHeaderConfig.ExtraHeaderPrefixes,
|
||||
)
|
||||
|
||||
// also delete any custom front proxy headers
|
||||
if requestHeaderConfig != nil {
|
||||
headerrequest.ClearAuthenticationHeaders(
|
||||
req.Header,
|
||||
requestHeaderConfig.UsernameHeaders,
|
||||
requestHeaderConfig.UIDHeaders,
|
||||
requestHeaderConfig.GroupHeaders,
|
||||
requestHeaderConfig.ExtraHeaderPrefixes,
|
||||
)
|
||||
}
|
||||
|
||||
// http2 is an expensive protocol that is prone to abuse,
|
||||
// see CVE-2023-44487 and CVE-2023-39325 for an example.
|
||||
// Do not allow unauthenticated clients to keep these
|
||||
// connections open (i.e. basically degrade them to the
|
||||
// performance of http1 with keep-alive disabled).
|
||||
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.UnauthenticatedHTTP2DOSMitigation) && req.ProtoMajor == 2 && isAnonymousUser(resp.User) {
|
||||
// limit this connection to just this request,
|
||||
// and then send a GOAWAY and tear down the TCP connection
|
||||
// https://github.com/golang/net/commit/97aa3a539ec716117a9d15a4659a911f50d13c3c
|
||||
w.Header().Set("Connection", "close")
|
||||
}
|
||||
|
||||
req = req.WithContext(genericapirequest.WithUser(req.Context(), resp.User))
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
func Unauthorized(s runtime.NegotiatedSerializer) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
// http2 is an expensive protocol that is prone to abuse,
|
||||
// see CVE-2023-44487 and CVE-2023-39325 for an example.
|
||||
// Do not allow unauthenticated clients to keep these
|
||||
// connections open (i.e. basically degrade them to the
|
||||
// performance of http1 with keep-alive disabled).
|
||||
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.UnauthenticatedHTTP2DOSMitigation) && req.ProtoMajor == 2 {
|
||||
// limit this connection to just this request,
|
||||
// and then send a GOAWAY and tear down the TCP connection
|
||||
// https://github.com/golang/net/commit/97aa3a539ec716117a9d15a4659a911f50d13c3c
|
||||
w.Header().Set("Connection", "close")
|
||||
}
|
||||
ctx := req.Context()
|
||||
requestInfo, found := genericapirequest.RequestInfoFrom(ctx)
|
||||
if !found {
|
||||
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
|
||||
return
|
||||
}
|
||||
|
||||
gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion}
|
||||
responsewriters.ErrorNegotiated(apierrors.NewUnauthorized("Unauthorized"), s, gv, w, req)
|
||||
})
|
||||
}
|
||||
|
||||
func audiencesAreAcceptable(apiAuds, responseAudiences authenticator.Audiences) bool {
|
||||
if len(apiAuds) == 0 || len(responseAudiences) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return len(apiAuds.Intersect(responseAudiences)) > 0
|
||||
}
|
||||
|
||||
func isAnonymousUser(u user.Info) bool {
|
||||
if u.GetName() == user.Anonymous {
|
||||
return true
|
||||
}
|
||||
for _, group := range u.GetGroups() {
|
||||
if group == user.AllUnauthenticated {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
87
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go
generated
vendored
Normal file
87
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
)
|
||||
|
||||
// WithFailedAuthenticationAudit decorates a failed http.Handler used in WithAuthentication handler.
|
||||
// It is meant to log only failed authentication requests.
|
||||
func WithFailedAuthenticationAudit(failedHandler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator) http.Handler {
|
||||
if sink == nil || policy == nil {
|
||||
return failedHandler
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ac, err := evaluatePolicyAndCreateAuditEvent(req, policy)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err))
|
||||
responsewriters.InternalError(w, req, errors.New("failed to create audit event"))
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.Enabled() {
|
||||
failedHandler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
ev := &ac.Event
|
||||
|
||||
ev.ResponseStatus = &metav1.Status{}
|
||||
ev.ResponseStatus.Message = getAuthMethods(req)
|
||||
ev.Stage = auditinternal.StageResponseStarted
|
||||
|
||||
rw := decorateResponseWriter(req.Context(), w, ev, sink, ac.RequestAuditConfig.OmitStages)
|
||||
failedHandler.ServeHTTP(rw, req)
|
||||
})
|
||||
}
|
||||
|
||||
func getAuthMethods(req *http.Request) string {
|
||||
authMethods := []string{}
|
||||
|
||||
if _, _, ok := req.BasicAuth(); ok {
|
||||
authMethods = append(authMethods, "basic")
|
||||
}
|
||||
|
||||
auth := strings.TrimSpace(req.Header.Get("Authorization"))
|
||||
parts := strings.Split(auth, " ")
|
||||
if len(parts) > 1 && strings.ToLower(parts[0]) == "bearer" {
|
||||
authMethods = append(authMethods, "bearer")
|
||||
}
|
||||
|
||||
token := strings.TrimSpace(req.URL.Query().Get("access_token"))
|
||||
if len(token) > 0 {
|
||||
authMethods = append(authMethods, "access_token")
|
||||
}
|
||||
|
||||
if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 {
|
||||
authMethods = append(authMethods, "x509")
|
||||
}
|
||||
|
||||
if len(authMethods) > 0 {
|
||||
return fmt.Sprintf("Authentication failed, attempted: %s", strings.Join(authMethods, ", "))
|
||||
}
|
||||
return "Authentication failed, no credentials provided"
|
||||
}
|
152
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go
generated
vendored
Normal file
152
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
|
||||
const (
|
||||
// Annotation key names set in advanced audit
|
||||
decisionAnnotationKey = "authorization.k8s.io/decision"
|
||||
reasonAnnotationKey = "authorization.k8s.io/reason"
|
||||
|
||||
// Annotation values set in advanced audit
|
||||
decisionAllow = "allow"
|
||||
decisionForbid = "forbid"
|
||||
reasonError = "internal error"
|
||||
)
|
||||
|
||||
type recordAuthorizationMetricsFunc func(ctx context.Context, authorized authorizer.Decision, err error, authStart time.Time, authFinish time.Time)
|
||||
|
||||
// WithAuthorization passes all authorized requests on to handler, and returns a forbidden error otherwise.
|
||||
func WithAuthorization(hhandler http.Handler, auth authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler {
|
||||
return withAuthorization(hhandler, auth, s, recordAuthorizationMetrics)
|
||||
}
|
||||
|
||||
func withAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer, metrics recordAuthorizationMetricsFunc) http.Handler {
|
||||
if a == nil {
|
||||
klog.Warning("Authorization is disabled")
|
||||
return handler
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
authorizationStart := time.Now()
|
||||
|
||||
attributes, err := GetAuthorizerAttributes(ctx)
|
||||
if err != nil {
|
||||
responsewriters.InternalError(w, req, err)
|
||||
return
|
||||
}
|
||||
authorized, reason, err := a.Authorize(ctx, attributes)
|
||||
|
||||
authorizationFinish := time.Now()
|
||||
defer func() {
|
||||
metrics(ctx, authorized, err, authorizationStart, authorizationFinish)
|
||||
}()
|
||||
|
||||
// an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here.
|
||||
if authorized == authorizer.DecisionAllow {
|
||||
audit.AddAuditAnnotations(ctx,
|
||||
decisionAnnotationKey, decisionAllow,
|
||||
reasonAnnotationKey, reason)
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
audit.AddAuditAnnotation(ctx, reasonAnnotationKey, reasonError)
|
||||
responsewriters.InternalError(w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason)
|
||||
audit.AddAuditAnnotations(ctx,
|
||||
decisionAnnotationKey, decisionForbid,
|
||||
reasonAnnotationKey, reason)
|
||||
responsewriters.Forbidden(ctx, attributes, w, req, reason, s)
|
||||
})
|
||||
}
|
||||
|
||||
func GetAuthorizerAttributes(ctx context.Context) (authorizer.Attributes, error) {
|
||||
attribs := authorizer.AttributesRecord{}
|
||||
|
||||
user, ok := request.UserFrom(ctx)
|
||||
if ok {
|
||||
attribs.User = user
|
||||
}
|
||||
|
||||
requestInfo, found := request.RequestInfoFrom(ctx)
|
||||
if !found {
|
||||
return nil, errors.New("no RequestInfo found in the context")
|
||||
}
|
||||
|
||||
// Start with common attributes that apply to resource and non-resource requests
|
||||
attribs.ResourceRequest = requestInfo.IsResourceRequest
|
||||
attribs.Path = requestInfo.Path
|
||||
attribs.Verb = requestInfo.Verb
|
||||
|
||||
attribs.APIGroup = requestInfo.APIGroup
|
||||
attribs.APIVersion = requestInfo.APIVersion
|
||||
attribs.Resource = requestInfo.Resource
|
||||
attribs.Subresource = requestInfo.Subresource
|
||||
attribs.Namespace = requestInfo.Namespace
|
||||
attribs.Name = requestInfo.Name
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
|
||||
// parsing here makes it easy to keep the AttributesRecord type value-only and avoids any mutex copies when
|
||||
// doing shallow copies in other steps.
|
||||
if len(requestInfo.FieldSelector) > 0 {
|
||||
fieldSelector, err := fields.ParseSelector(requestInfo.FieldSelector)
|
||||
if err != nil {
|
||||
attribs.FieldSelectorRequirements, attribs.FieldSelectorParsingErr = nil, err
|
||||
} else {
|
||||
if requirements := fieldSelector.Requirements(); len(requirements) > 0 {
|
||||
attribs.FieldSelectorRequirements, attribs.FieldSelectorParsingErr = fieldSelector.Requirements(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(requestInfo.LabelSelector) > 0 {
|
||||
labelSelector, err := labels.Parse(requestInfo.LabelSelector)
|
||||
if err != nil {
|
||||
attribs.LabelSelectorRequirements, attribs.LabelSelectorParsingErr = nil, err
|
||||
} else {
|
||||
if requirements, _ /*selectable*/ := labelSelector.Requirements(); len(requirements) > 0 {
|
||||
attribs.LabelSelectorRequirements, attribs.LabelSelectorParsingErr = requirements, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &attribs, nil
|
||||
}
|
33
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go
generated
vendored
Normal file
33
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/cachecontrol.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// WithCacheControl sets the Cache-Control header to "no-cache, private" because all servers are protected by authn/authz.
|
||||
// see https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#defining_optimal_cache-control_policy
|
||||
func WithCacheControl(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
// Set the cache-control header if it is not already set
|
||||
if _, ok := w.Header()["Cache-Control"]; !ok {
|
||||
w.Header().Set("Cache-Control", "no-cache, private")
|
||||
}
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
21
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go
generated
vendored
Normal file
21
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package filters contains all the http handler chain filters which
|
||||
// _are_ api related, i.e. which are prerequisite for the API services
|
||||
// to work (in contrast to the filters in the server package which are
|
||||
// not part of the API contract).
|
||||
package filters // import "k8s.io/apiserver/pkg/endpoints/filters"
|
274
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go
generated
vendored
Normal file
274
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go
generated
vendored
Normal file
@ -0,0 +1,274 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/server/httplog"
|
||||
)
|
||||
|
||||
// WithImpersonation is a filter that will inspect and check requests that attempt to change the user.Info for their requests
|
||||
func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
impersonationRequests, err := buildImpersonationRequests(req.Header)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("%v", err)
|
||||
responsewriters.InternalError(w, req, err)
|
||||
return
|
||||
}
|
||||
if len(impersonationRequests) == 0 {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := req.Context()
|
||||
requestor, exists := request.UserFrom(ctx)
|
||||
if !exists {
|
||||
responsewriters.InternalError(w, req, errors.New("no user found for request"))
|
||||
return
|
||||
}
|
||||
|
||||
// if groups are not specified, then we need to look them up differently depending on the type of user
|
||||
// if they are specified, then they are the authority (including the inclusion of system:authenticated/system:unauthenticated groups)
|
||||
groupsSpecified := len(req.Header[authenticationv1.ImpersonateGroupHeader]) > 0
|
||||
|
||||
// make sure we're allowed to impersonate each thing we're requesting. While we're iterating through, start building username
|
||||
// and group information
|
||||
username := ""
|
||||
groups := []string{}
|
||||
userExtra := map[string][]string{}
|
||||
uid := ""
|
||||
for _, impersonationRequest := range impersonationRequests {
|
||||
gvk := impersonationRequest.GetObjectKind().GroupVersionKind()
|
||||
actingAsAttributes := &authorizer.AttributesRecord{
|
||||
User: requestor,
|
||||
Verb: "impersonate",
|
||||
APIGroup: gvk.Group,
|
||||
APIVersion: gvk.Version,
|
||||
Namespace: impersonationRequest.Namespace,
|
||||
Name: impersonationRequest.Name,
|
||||
ResourceRequest: true,
|
||||
}
|
||||
|
||||
switch gvk.GroupKind() {
|
||||
case v1.SchemeGroupVersion.WithKind("ServiceAccount").GroupKind():
|
||||
actingAsAttributes.Resource = "serviceaccounts"
|
||||
username = serviceaccount.MakeUsername(impersonationRequest.Namespace, impersonationRequest.Name)
|
||||
if !groupsSpecified {
|
||||
// if groups aren't specified for a service account, we know the groups because its a fixed mapping. Add them
|
||||
groups = serviceaccount.MakeGroupNames(impersonationRequest.Namespace)
|
||||
}
|
||||
|
||||
case v1.SchemeGroupVersion.WithKind("User").GroupKind():
|
||||
actingAsAttributes.Resource = "users"
|
||||
username = impersonationRequest.Name
|
||||
|
||||
case v1.SchemeGroupVersion.WithKind("Group").GroupKind():
|
||||
actingAsAttributes.Resource = "groups"
|
||||
groups = append(groups, impersonationRequest.Name)
|
||||
|
||||
case authenticationv1.SchemeGroupVersion.WithKind("UserExtra").GroupKind():
|
||||
extraKey := impersonationRequest.FieldPath
|
||||
extraValue := impersonationRequest.Name
|
||||
actingAsAttributes.Resource = "userextras"
|
||||
actingAsAttributes.Subresource = extraKey
|
||||
userExtra[extraKey] = append(userExtra[extraKey], extraValue)
|
||||
|
||||
case authenticationv1.SchemeGroupVersion.WithKind("UID").GroupKind():
|
||||
uid = string(impersonationRequest.Name)
|
||||
actingAsAttributes.Resource = "uids"
|
||||
|
||||
default:
|
||||
klog.V(4).InfoS("unknown impersonation request type", "request", impersonationRequest)
|
||||
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s)
|
||||
return
|
||||
}
|
||||
|
||||
decision, reason, err := a.Authorize(ctx, actingAsAttributes)
|
||||
if err != nil || decision != authorizer.DecisionAllow {
|
||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason, "err", err)
|
||||
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if username != user.Anonymous {
|
||||
// When impersonating a non-anonymous user, include the 'system:authenticated' group
|
||||
// in the impersonated user info:
|
||||
// - if no groups were specified
|
||||
// - if a group has been specified other than 'system:authenticated'
|
||||
//
|
||||
// If 'system:unauthenticated' group has been specified we should not include
|
||||
// the 'system:authenticated' group.
|
||||
addAuthenticated := true
|
||||
for _, group := range groups {
|
||||
if group == user.AllAuthenticated || group == user.AllUnauthenticated {
|
||||
addAuthenticated = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if addAuthenticated {
|
||||
groups = append(groups, user.AllAuthenticated)
|
||||
}
|
||||
} else {
|
||||
addUnauthenticated := true
|
||||
for _, group := range groups {
|
||||
if group == user.AllUnauthenticated {
|
||||
addUnauthenticated = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if addUnauthenticated {
|
||||
groups = append(groups, user.AllUnauthenticated)
|
||||
}
|
||||
}
|
||||
|
||||
newUser := &user.DefaultInfo{
|
||||
Name: username,
|
||||
Groups: groups,
|
||||
Extra: userExtra,
|
||||
UID: uid,
|
||||
}
|
||||
req = req.WithContext(request.WithUser(ctx, newUser))
|
||||
|
||||
oldUser, _ := request.UserFrom(ctx)
|
||||
httplog.LogOf(req, w).Addf("%v is impersonating %v", userString(oldUser), userString(newUser))
|
||||
|
||||
ae := audit.AuditEventFrom(ctx)
|
||||
audit.LogImpersonatedUser(ae, newUser)
|
||||
|
||||
// clear all the impersonation headers from the request
|
||||
req.Header.Del(authenticationv1.ImpersonateUserHeader)
|
||||
req.Header.Del(authenticationv1.ImpersonateGroupHeader)
|
||||
req.Header.Del(authenticationv1.ImpersonateUIDHeader)
|
||||
for headerName := range req.Header {
|
||||
if strings.HasPrefix(headerName, authenticationv1.ImpersonateUserExtraHeaderPrefix) {
|
||||
req.Header.Del(headerName)
|
||||
}
|
||||
}
|
||||
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
func userString(u user.Info) string {
|
||||
if u == nil {
|
||||
return "<none>"
|
||||
}
|
||||
b := strings.Builder{}
|
||||
if name := u.GetName(); name == "" {
|
||||
b.WriteString("<empty>")
|
||||
} else {
|
||||
b.WriteString(name)
|
||||
}
|
||||
if groups := u.GetGroups(); len(groups) > 0 {
|
||||
b.WriteString("[")
|
||||
b.WriteString(strings.Join(groups, ","))
|
||||
b.WriteString("]")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func unescapeExtraKey(encodedKey string) string {
|
||||
key, err := url.PathUnescape(encodedKey) // Decode %-encoded bytes.
|
||||
if err != nil {
|
||||
return encodedKey // Always record extra strings, even if malformed/unencoded.
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// buildImpersonationRequests returns a list of objectreferences that represent the different things we're requesting to impersonate.
|
||||
// Also includes a map[string][]string representing user.Info.Extra
|
||||
// Each request must be authorized against the current user before switching contexts.
|
||||
func buildImpersonationRequests(headers http.Header) ([]v1.ObjectReference, error) {
|
||||
impersonationRequests := []v1.ObjectReference{}
|
||||
|
||||
requestedUser := headers.Get(authenticationv1.ImpersonateUserHeader)
|
||||
hasUser := len(requestedUser) > 0
|
||||
if hasUser {
|
||||
if namespace, name, err := serviceaccount.SplitUsername(requestedUser); err == nil {
|
||||
impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "ServiceAccount", Namespace: namespace, Name: name})
|
||||
} else {
|
||||
impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "User", Name: requestedUser})
|
||||
}
|
||||
}
|
||||
|
||||
hasGroups := false
|
||||
for _, group := range headers[authenticationv1.ImpersonateGroupHeader] {
|
||||
hasGroups = true
|
||||
impersonationRequests = append(impersonationRequests, v1.ObjectReference{Kind: "Group", Name: group})
|
||||
}
|
||||
|
||||
hasUserExtra := false
|
||||
for headerName, values := range headers {
|
||||
if !strings.HasPrefix(headerName, authenticationv1.ImpersonateUserExtraHeaderPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
hasUserExtra = true
|
||||
extraKey := unescapeExtraKey(strings.ToLower(headerName[len(authenticationv1.ImpersonateUserExtraHeaderPrefix):]))
|
||||
|
||||
// make a separate request for each extra value they're trying to set
|
||||
for _, value := range values {
|
||||
impersonationRequests = append(impersonationRequests,
|
||||
v1.ObjectReference{
|
||||
Kind: "UserExtra",
|
||||
// we only parse out a group above, but the parsing will fail if there isn't SOME version
|
||||
// using the internal version will help us fail if anyone starts using it
|
||||
APIVersion: authenticationv1.SchemeGroupVersion.String(),
|
||||
Name: value,
|
||||
// ObjectReference doesn't have a subresource field. FieldPath is close and available, so we'll use that
|
||||
// TODO fight the good fight for ObjectReference to refer to resources and subresources
|
||||
FieldPath: extraKey,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
requestedUID := headers.Get(authenticationv1.ImpersonateUIDHeader)
|
||||
hasUID := len(requestedUID) > 0
|
||||
if hasUID {
|
||||
impersonationRequests = append(impersonationRequests, v1.ObjectReference{
|
||||
Kind: "UID",
|
||||
Name: requestedUID,
|
||||
APIVersion: authenticationv1.SchemeGroupVersion.String(),
|
||||
})
|
||||
}
|
||||
|
||||
if (hasGroups || hasUserExtra || hasUID) && !hasUser {
|
||||
return nil, fmt.Errorf("requested %v without impersonating a user", impersonationRequests)
|
||||
}
|
||||
|
||||
return impersonationRequests, nil
|
||||
}
|
161
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go
generated
vendored
Normal file
161
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/metrics.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
/*
|
||||
* By default, all the following metrics are defined as falling under
|
||||
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
|
||||
*
|
||||
* Promoting the stability level of the metric is a responsibility of the component owner, since it
|
||||
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
|
||||
* the metric stability policy.
|
||||
*/
|
||||
const (
|
||||
successLabel = "success"
|
||||
failureLabel = "failure"
|
||||
errorLabel = "error"
|
||||
|
||||
allowedLabel = "allowed"
|
||||
deniedLabel = "denied"
|
||||
noOpinionLabel = "no-opinion"
|
||||
)
|
||||
|
||||
var (
|
||||
authenticatedUserCounter = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "authenticated_user_requests",
|
||||
Help: "Counter of authenticated requests broken out by username.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"username"},
|
||||
)
|
||||
|
||||
authenticatedAttemptsCounter = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "authentication_attempts",
|
||||
Help: "Counter of authenticated attempts.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
|
||||
authenticationLatency = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Name: "authentication_duration_seconds",
|
||||
Help: "Authentication duration in seconds broken out by result.",
|
||||
Buckets: metrics.ExponentialBuckets(0.001, 2, 15),
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
|
||||
authorizationAttemptsCounter = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "authorization_attempts_total",
|
||||
Help: "Counter of authorization attempts broken down by result. It can be either 'allowed', 'denied', 'no-opinion' or 'error'.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
|
||||
authorizationLatency = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Name: "authorization_duration_seconds",
|
||||
Help: "Authorization duration in seconds broken out by result.",
|
||||
Buckets: metrics.ExponentialBuckets(0.001, 2, 15),
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
legacyregistry.MustRegister(authenticatedUserCounter)
|
||||
legacyregistry.MustRegister(authenticatedAttemptsCounter)
|
||||
legacyregistry.MustRegister(authenticationLatency)
|
||||
legacyregistry.MustRegister(authorizationAttemptsCounter)
|
||||
legacyregistry.MustRegister(authorizationLatency)
|
||||
}
|
||||
|
||||
func recordAuthorizationMetrics(ctx context.Context, authorized authorizer.Decision, err error, authStart time.Time, authFinish time.Time) {
|
||||
var resultLabel string
|
||||
|
||||
switch {
|
||||
case authorized == authorizer.DecisionAllow:
|
||||
resultLabel = allowedLabel
|
||||
case err != nil:
|
||||
resultLabel = errorLabel
|
||||
case authorized == authorizer.DecisionDeny:
|
||||
resultLabel = deniedLabel
|
||||
case authorized == authorizer.DecisionNoOpinion:
|
||||
resultLabel = noOpinionLabel
|
||||
}
|
||||
|
||||
authorizationAttemptsCounter.WithContext(ctx).WithLabelValues(resultLabel).Inc()
|
||||
authorizationLatency.WithContext(ctx).WithLabelValues(resultLabel).Observe(authFinish.Sub(authStart).Seconds())
|
||||
}
|
||||
|
||||
func recordAuthenticationMetrics(ctx context.Context, resp *authenticator.Response, ok bool, err error, apiAudiences authenticator.Audiences, authStart time.Time, authFinish time.Time) {
|
||||
var resultLabel string
|
||||
|
||||
switch {
|
||||
case err != nil || (resp != nil && !audiencesAreAcceptable(apiAudiences, resp.Audiences)):
|
||||
resultLabel = errorLabel
|
||||
case !ok:
|
||||
resultLabel = failureLabel
|
||||
default:
|
||||
resultLabel = successLabel
|
||||
authenticatedUserCounter.WithContext(ctx).WithLabelValues(compressUsername(resp.User.GetName())).Inc()
|
||||
}
|
||||
|
||||
authenticatedAttemptsCounter.WithContext(ctx).WithLabelValues(resultLabel).Inc()
|
||||
authenticationLatency.WithContext(ctx).WithLabelValues(resultLabel).Observe(authFinish.Sub(authStart).Seconds())
|
||||
}
|
||||
|
||||
// compressUsername maps all possible usernames onto a small set of categories
|
||||
// of usernames. This is done both to limit the cardinality of the
|
||||
// authorized_user_requests metric, and to avoid pushing actual usernames in the
|
||||
// metric.
|
||||
func compressUsername(username string) string {
|
||||
switch {
|
||||
// Known internal identities.
|
||||
case username == "admin" ||
|
||||
username == "client" ||
|
||||
username == "kube_proxy" ||
|
||||
username == "kubelet" ||
|
||||
username == "system:serviceaccount:kube-system:default":
|
||||
return username
|
||||
// Probably an email address.
|
||||
case strings.Contains(username, "@"):
|
||||
return "email_id"
|
||||
// Anything else (custom service accounts, custom external identities, etc.)
|
||||
default:
|
||||
return "other"
|
||||
}
|
||||
}
|
64
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/mux_discovery_complete.go
generated
vendored
Normal file
64
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/mux_discovery_complete.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type muxAndDiscoveryIncompleteKeyType int
|
||||
|
||||
const (
|
||||
// muxAndDiscoveryIncompleteKey is a key under which a protection signal for all requests made before the server have installed all known HTTP paths is stored in the request's context
|
||||
muxAndDiscoveryIncompleteKey muxAndDiscoveryIncompleteKeyType = iota
|
||||
)
|
||||
|
||||
// NoMuxAndDiscoveryIncompleteKey checks if the context contains muxAndDiscoveryIncompleteKey.
|
||||
// The presence of the key indicates the request has been made when the HTTP paths weren't installed.
|
||||
func NoMuxAndDiscoveryIncompleteKey(ctx context.Context) bool {
|
||||
muxAndDiscoveryCompleteProtectionKeyValue, _ := ctx.Value(muxAndDiscoveryIncompleteKey).(string)
|
||||
return len(muxAndDiscoveryCompleteProtectionKeyValue) == 0
|
||||
}
|
||||
|
||||
// WithMuxAndDiscoveryComplete puts the muxAndDiscoveryIncompleteKey in the context if a request has been made before muxAndDiscoveryCompleteSignal has been ready.
|
||||
// Putting the key protect us from returning a 404 response instead of a 503.
|
||||
// It is especially important for controllers like GC and NS since they act on 404s.
|
||||
//
|
||||
// The presence of the key is checked in the NotFoundHandler (staging/src/k8s.io/apiserver/pkg/util/notfoundhandler/not_found_handler.go)
|
||||
//
|
||||
// The primary reason this filter exists is to protect from a potential race between the client's requests reaching the NotFoundHandler and the server becoming ready.
|
||||
// Without the protection key a request could still get a 404 response when the registered signals changed their status just slightly before reaching the new handler.
|
||||
// In that case, the presence of the key will make the handler return a 503 instead of a 404.
|
||||
func WithMuxAndDiscoveryComplete(handler http.Handler, muxAndDiscoveryCompleteSignal <-chan struct{}) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if muxAndDiscoveryCompleteSignal != nil && !isClosed(muxAndDiscoveryCompleteSignal) {
|
||||
req = req.WithContext(context.WithValue(req.Context(), muxAndDiscoveryIncompleteKey, "MuxAndDiscoveryInstallationNotComplete"))
|
||||
}
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
// isClosed is a convenience function that simply check if the given chan has been closed
|
||||
func isClosed(ch <-chan struct{}) bool {
|
||||
select {
|
||||
case <-ch:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
177
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go
generated
vendored
Normal file
177
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_deadline.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
// The 'timeout' query parameter in the request URL has an invalid duration specifier
|
||||
invalidTimeoutInURL = "invalid timeout specified in the request URL"
|
||||
)
|
||||
|
||||
// WithRequestDeadline determines the timeout duration applicable to the given request and sets a new context
|
||||
// with the appropriate deadline.
|
||||
// auditWrapper provides an http.Handler that audits a failed request.
|
||||
// longRunning returns true if he given request is a long running request.
|
||||
// requestTimeoutMaximum specifies the default request timeout value.
|
||||
func WithRequestDeadline(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator, longRunning request.LongRunningRequestCheck,
|
||||
negotiatedSerializer runtime.NegotiatedSerializer, requestTimeoutMaximum time.Duration) http.Handler {
|
||||
return withRequestDeadline(handler, sink, policy, longRunning, negotiatedSerializer, requestTimeoutMaximum, clock.RealClock{})
|
||||
}
|
||||
|
||||
func withRequestDeadline(handler http.Handler, sink audit.Sink, policy audit.PolicyRuleEvaluator, longRunning request.LongRunningRequestCheck,
|
||||
negotiatedSerializer runtime.NegotiatedSerializer, requestTimeoutMaximum time.Duration, clock clock.PassiveClock) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
|
||||
requestInfo, ok := request.RequestInfoFrom(ctx)
|
||||
if !ok {
|
||||
handleError(w, req, http.StatusInternalServerError, nil, "no RequestInfo found in context, handler chain must be wrong")
|
||||
return
|
||||
}
|
||||
if longRunning(req, requestInfo) {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
userSpecifiedTimeout, ok, err := parseTimeout(req)
|
||||
if err != nil {
|
||||
statusErr := apierrors.NewBadRequest(err.Error())
|
||||
|
||||
klog.Errorf("Error - %s: %#v", err.Error(), req.RequestURI)
|
||||
|
||||
failed := failedErrorHandler(negotiatedSerializer, statusErr)
|
||||
failWithAudit := withFailedRequestAudit(failed, statusErr, sink, policy)
|
||||
failWithAudit.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
timeout := requestTimeoutMaximum
|
||||
if ok {
|
||||
// we use the default timeout enforced by the apiserver:
|
||||
// - if the user has specified a timeout of 0s, this implies no timeout on the user's part.
|
||||
// - if the user has specified a timeout that exceeds the maximum deadline allowed by the apiserver.
|
||||
if userSpecifiedTimeout > 0 && userSpecifiedTimeout < requestTimeoutMaximum {
|
||||
timeout = userSpecifiedTimeout
|
||||
}
|
||||
}
|
||||
|
||||
started := clock.Now()
|
||||
if requestStartedTimestamp, ok := request.ReceivedTimestampFrom(ctx); ok {
|
||||
started = requestStartedTimestamp
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithDeadline(ctx, started.Add(timeout))
|
||||
defer cancel()
|
||||
|
||||
req = req.WithContext(ctx)
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
// withFailedRequestAudit decorates a failed http.Handler and is used to audit a failed request.
|
||||
// statusErr is used to populate the Message property of ResponseStatus.
|
||||
func withFailedRequestAudit(failedHandler http.Handler, statusErr *apierrors.StatusError, sink audit.Sink, policy audit.PolicyRuleEvaluator) http.Handler {
|
||||
if sink == nil || policy == nil {
|
||||
return failedHandler
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ac, err := evaluatePolicyAndCreateAuditEvent(req, policy)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to create audit event: %v", err))
|
||||
responsewriters.InternalError(w, req, errors.New("failed to create audit event"))
|
||||
return
|
||||
}
|
||||
|
||||
if !ac.Enabled() {
|
||||
failedHandler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
ev := &ac.Event
|
||||
|
||||
ev.ResponseStatus = &metav1.Status{}
|
||||
ev.Stage = auditinternal.StageResponseStarted
|
||||
if statusErr != nil {
|
||||
ev.ResponseStatus.Message = statusErr.Error()
|
||||
}
|
||||
|
||||
rw := decorateResponseWriter(req.Context(), w, ev, sink, ac.RequestAuditConfig.OmitStages)
|
||||
failedHandler.ServeHTTP(rw, req)
|
||||
})
|
||||
}
|
||||
|
||||
// failedErrorHandler returns an http.Handler that uses the specified StatusError object
|
||||
// to render an error response to the request.
|
||||
func failedErrorHandler(s runtime.NegotiatedSerializer, statusError *apierrors.StatusError) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
requestInfo, found := request.RequestInfoFrom(ctx)
|
||||
if !found {
|
||||
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
|
||||
return
|
||||
}
|
||||
|
||||
gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion}
|
||||
responsewriters.ErrorNegotiated(statusError, s, gv, w, req)
|
||||
})
|
||||
}
|
||||
|
||||
// parseTimeout parses the given HTTP request URL and extracts the timeout query parameter
|
||||
// value if specified by the user.
|
||||
// If a timeout is not specified the function returns false and err is set to nil
|
||||
// If the value specified is malformed then the function returns false and err is set
|
||||
func parseTimeout(req *http.Request) (time.Duration, bool, error) {
|
||||
value := req.URL.Query().Get("timeout")
|
||||
if value == "" {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
timeout, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("%s - %s", invalidTimeoutInURL, err.Error())
|
||||
}
|
||||
|
||||
return timeout, true, nil
|
||||
}
|
||||
|
||||
// handleError does the following:
|
||||
// a) it writes the specified error code, and msg to the ResponseWriter
|
||||
// object, it does not print the given innerErr into the ResponseWriter object.
|
||||
// b) additionally, it prints the given msg, and innerErr to the log with other
|
||||
// request scoped data that helps identify the given request.
|
||||
func handleError(w http.ResponseWriter, r *http.Request, code int, innerErr error, msg string) {
|
||||
http.Error(w, msg, code)
|
||||
klog.ErrorSDepth(1, innerErr, msg, "method", r.Method, "URI", r.RequestURI, "auditID", audit.GetAuditIDTruncated(r.Context()))
|
||||
}
|
40
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go
generated
vendored
Normal file
40
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/request_received_time.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// WithRequestReceivedTimestamp attaches the ReceivedTimestamp (the time the request reached
|
||||
// the apiserver) to the context.
|
||||
func WithRequestReceivedTimestamp(handler http.Handler) http.Handler {
|
||||
return withRequestReceivedTimestampWithClock(handler, clock.RealClock{})
|
||||
}
|
||||
|
||||
// The clock is passed as a parameter, handy for unit testing.
|
||||
func withRequestReceivedTimestampWithClock(handler http.Handler, clock clock.PassiveClock) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
req = req.WithContext(request.WithReceivedTimestamp(ctx, clock.Now()))
|
||||
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
41
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go
generated
vendored
Normal file
41
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
|
||||
// WithRequestInfo attaches a RequestInfo to the context.
|
||||
func WithRequestInfo(handler http.Handler, resolver request.RequestInfoResolver) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
info, err := resolver.NewRequestInfo(req)
|
||||
if err != nil {
|
||||
responsewriters.InternalError(w, req, fmt.Errorf("failed to create RequestInfo: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
req = req.WithContext(request.WithRequestInfo(ctx, info))
|
||||
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
121
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go
generated
vendored
Normal file
121
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/storageversion.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/storageversion"
|
||||
_ "k8s.io/component-base/metrics/prometheus/workqueue" // for workqueue metric registration
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// WithStorageVersionPrecondition checks if the storage version barrier has
|
||||
// completed, if not, it only passes the following API requests:
|
||||
// 1. non-resource requests,
|
||||
// 2. read requests,
|
||||
// 3. write requests to the storageversion API,
|
||||
// 4. create requests to the namespace API sent by apiserver itself,
|
||||
// 5. write requests to the lease API in kube-system namespace,
|
||||
// 6. resources whose StorageVersion is not pending update, including non-persisted resources.
|
||||
func WithStorageVersionPrecondition(handler http.Handler, svm storageversion.Manager, s runtime.NegotiatedSerializer) http.Handler {
|
||||
if svm == nil {
|
||||
// TODO(roycaihw): switch to warning after the feature graduate to beta/GA
|
||||
klog.V(2).Infof("Storage Version barrier is disabled")
|
||||
return handler
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if svm.Completed() {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
ctx := req.Context()
|
||||
requestInfo, found := request.RequestInfoFrom(ctx)
|
||||
if !found {
|
||||
responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context"))
|
||||
return
|
||||
}
|
||||
// Allow non-resource requests
|
||||
if !requestInfo.IsResourceRequest {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
// Allow read requests
|
||||
if requestInfo.Verb == "get" || requestInfo.Verb == "list" || requestInfo.Verb == "watch" {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
// Allow writes to the storage version API
|
||||
if requestInfo.APIGroup == "internal.apiserver.k8s.io" && requestInfo.Resource == "storageversions" {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
// The system namespace is required for apiserver-identity lease to exist. Allow the apiserver
|
||||
// itself to create namespaces.
|
||||
// NOTE: with this exception, if the bootstrap client writes namespaces with a new version,
|
||||
// and the upgraded apiserver dies before updating the StorageVersion for namespaces, the
|
||||
// storage migrator won't be able to tell these namespaces are stored in a different version in etcd.
|
||||
// Because the bootstrap client only creates system namespace and doesn't update them, this can
|
||||
// only happen if the upgraded apiserver is the first apiserver that kicks off namespace creation,
|
||||
// or if an upgraded server that joins an existing cluster has new system namespaces (other
|
||||
// than kube-system, kube-public, kube-node-lease) that need to be created.
|
||||
u, hasUser := request.UserFrom(ctx)
|
||||
if requestInfo.APIGroup == "" && requestInfo.Resource == "namespaces" &&
|
||||
requestInfo.Verb == "create" && hasUser &&
|
||||
u.GetName() == user.APIServerUser && contains(u.GetGroups(), user.SystemPrivilegedGroup) {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
// Allow writes to the lease API in kube-system. The storage version API depends on the
|
||||
// apiserver-identity leases to operate. Leases in kube-system are either apiserver-identity
|
||||
// lease (which gets garbage collected when stale) or leader-election leases (which gets
|
||||
// periodically updated by system components). Both types of leases won't be stale in etcd.
|
||||
if requestInfo.APIGroup == "coordination.k8s.io" && requestInfo.Resource == "leases" &&
|
||||
requestInfo.Namespace == metav1.NamespaceSystem {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
// If the resource's StorageVersion is not in the to-be-updated list, let it pass.
|
||||
// Non-persisted resources are not in the to-be-updated list, so they will pass.
|
||||
gr := schema.GroupResource{Group: requestInfo.APIGroup, Resource: requestInfo.Resource}
|
||||
if !svm.PendingUpdate(gr) {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion}
|
||||
responsewriters.ErrorNegotiated(apierrors.NewServiceUnavailable(fmt.Sprintf("wait for storage version registration to complete for resource: %v, last seen error: %v", gr, svm.LastUpdateError(gr))), s, gv, w, req)
|
||||
})
|
||||
}
|
||||
|
||||
func contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
75
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go
generated
vendored
Normal file
75
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
|
||||
tracing "k8s.io/component-base/tracing"
|
||||
)
|
||||
|
||||
// WithTracing adds tracing to requests if the incoming request is sampled
|
||||
func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler {
|
||||
opts := []otelhttp.Option{
|
||||
otelhttp.WithPropagators(tracing.Propagators()),
|
||||
otelhttp.WithPublicEndpoint(),
|
||||
otelhttp.WithTracerProvider(tp),
|
||||
otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string {
|
||||
ctx := r.Context()
|
||||
info, exist := request.RequestInfoFrom(ctx)
|
||||
if !exist || !info.IsResourceRequest {
|
||||
return r.Method
|
||||
}
|
||||
return getSpanNameFromRequestInfo(info, r)
|
||||
}),
|
||||
}
|
||||
wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Add the http.target attribute to the otelhttp span
|
||||
// Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743
|
||||
if r.URL != nil {
|
||||
trace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI()))
|
||||
}
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
// With Noop TracerProvider, the otelhttp still handles context propagation.
|
||||
// See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough
|
||||
return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...)
|
||||
}
|
||||
|
||||
func getSpanNameFromRequestInfo(info *request.RequestInfo, r *http.Request) string {
|
||||
spanName := "/" + info.APIPrefix
|
||||
if info.APIGroup != "" {
|
||||
spanName += "/" + info.APIGroup
|
||||
}
|
||||
spanName += "/" + info.APIVersion
|
||||
if info.Namespace != "" {
|
||||
spanName += "/namespaces/{:namespace}"
|
||||
}
|
||||
spanName += "/" + info.Resource
|
||||
if info.Name != "" {
|
||||
spanName += "/" + "{:name}"
|
||||
}
|
||||
if info.Subresource != "" {
|
||||
spanName += "/" + info.Subresource
|
||||
}
|
||||
return r.Method + " " + spanName
|
||||
}
|
133
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go
generated
vendored
Normal file
133
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/warning.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/warning"
|
||||
)
|
||||
|
||||
// WithWarningRecorder attaches a deduplicating k8s.io/apiserver/pkg/warning#WarningRecorder to the request context.
|
||||
func WithWarningRecorder(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
recorder := &recorder{writer: w}
|
||||
req = req.WithContext(warning.WithWarningRecorder(req.Context(), recorder))
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
truncateAtTotalRunes = 4 * 1024
|
||||
truncateItemRunes = 256
|
||||
)
|
||||
|
||||
type recordedWarning struct {
|
||||
agent string
|
||||
text string
|
||||
}
|
||||
|
||||
type recorder struct {
|
||||
// lock guards calls to AddWarning from multiple threads
|
||||
lock sync.Mutex
|
||||
|
||||
// recorded tracks whether AddWarning was already called with a given text
|
||||
recorded map[string]bool
|
||||
|
||||
// ordered tracks warnings added so they can be replayed and truncated if needed
|
||||
ordered []recordedWarning
|
||||
|
||||
// written tracks how many runes of text have been added as warning headers
|
||||
written int
|
||||
|
||||
// truncating tracks if we have already exceeded truncateAtTotalRunes and are now truncating warning messages as we add them
|
||||
truncating bool
|
||||
|
||||
// writer is the response writer to add warning headers to
|
||||
writer http.ResponseWriter
|
||||
}
|
||||
|
||||
func (r *recorder) AddWarning(agent, text string) {
|
||||
if len(text) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
|
||||
// if we've already exceeded our limit and are already truncating, return early
|
||||
if r.written >= truncateAtTotalRunes && r.truncating {
|
||||
return
|
||||
}
|
||||
|
||||
// init if needed
|
||||
if r.recorded == nil {
|
||||
r.recorded = map[string]bool{}
|
||||
}
|
||||
|
||||
// dedupe if already warned
|
||||
if r.recorded[text] {
|
||||
return
|
||||
}
|
||||
r.recorded[text] = true
|
||||
r.ordered = append(r.ordered, recordedWarning{agent: agent, text: text})
|
||||
|
||||
// truncate on a rune boundary, if needed
|
||||
textRuneLength := utf8.RuneCountInString(text)
|
||||
if r.truncating && textRuneLength > truncateItemRunes {
|
||||
text = string([]rune(text)[:truncateItemRunes])
|
||||
textRuneLength = truncateItemRunes
|
||||
}
|
||||
|
||||
// compute the header
|
||||
header, err := net.NewWarningHeader(299, agent, text)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// if this fits within our limit, or we're already truncating, write and return
|
||||
if r.written+textRuneLength <= truncateAtTotalRunes || r.truncating {
|
||||
r.written += textRuneLength
|
||||
r.writer.Header().Add("Warning", header)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, enable truncation, reset, and replay the existing items as truncated warnings
|
||||
r.truncating = true
|
||||
r.written = 0
|
||||
r.writer.Header().Del("Warning")
|
||||
utilruntime.HandleError(fmt.Errorf("exceeded max warning header size, truncating"))
|
||||
for _, w := range r.ordered {
|
||||
agent := w.agent
|
||||
text := w.text
|
||||
|
||||
textRuneLength := utf8.RuneCountInString(text)
|
||||
if textRuneLength > truncateItemRunes {
|
||||
text = string([]rune(text)[:truncateItemRunes])
|
||||
textRuneLength = truncateItemRunes
|
||||
}
|
||||
if header, err := net.NewWarningHeader(299, agent, text); err == nil {
|
||||
r.written += textRuneLength
|
||||
r.writer.Header().Add("Warning", header)
|
||||
}
|
||||
}
|
||||
}
|
79
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go
generated
vendored
Normal file
79
e2e/vendor/k8s.io/apiserver/pkg/endpoints/filters/webhook_duration.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/endpoints/responsewriter"
|
||||
)
|
||||
|
||||
var (
|
||||
watchVerbs = sets.NewString("watch")
|
||||
)
|
||||
|
||||
// WithLatencyTrackers adds a LatencyTrackers instance to the
|
||||
// context associated with a request so that we can measure latency
|
||||
// incurred in various components within the apiserver.
|
||||
func WithLatencyTrackers(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
requestInfo, ok := request.RequestInfoFrom(ctx)
|
||||
if !ok {
|
||||
handleError(w, req, http.StatusInternalServerError, nil, "no RequestInfo found in context, handler chain must be wrong")
|
||||
return
|
||||
}
|
||||
|
||||
if watchVerbs.Has(requestInfo.Verb) {
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
req = req.WithContext(request.WithLatencyTrackers(ctx))
|
||||
w = responsewriter.WrapForHTTP1Or2(&writeLatencyTracker{
|
||||
ResponseWriter: w,
|
||||
ctx: req.Context(),
|
||||
})
|
||||
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = &writeLatencyTracker{}
|
||||
var _ responsewriter.UserProvidedDecorator = &writeLatencyTracker{}
|
||||
|
||||
type writeLatencyTracker struct {
|
||||
http.ResponseWriter
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (wt *writeLatencyTracker) Unwrap() http.ResponseWriter {
|
||||
return wt.ResponseWriter
|
||||
}
|
||||
|
||||
func (wt *writeLatencyTracker) Write(bs []byte) (int, error) {
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
request.TrackResponseWriteLatency(wt.ctx, time.Since(startedAt))
|
||||
}()
|
||||
|
||||
return wt.ResponseWriter.Write(bs)
|
||||
}
|
147
e2e/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go
generated
vendored
Normal file
147
e2e/vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
|
||||
apidiscoveryv2 "k8s.io/api/apidiscovery/v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/discovery"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storageversion"
|
||||
)
|
||||
|
||||
// ConvertabilityChecker indicates what versions a GroupKind is available in.
|
||||
type ConvertabilityChecker interface {
|
||||
// VersionsForGroupKind indicates what versions are available to convert a group kind. This determines
|
||||
// what our decoding abilities are.
|
||||
VersionsForGroupKind(gk schema.GroupKind) []schema.GroupVersion
|
||||
}
|
||||
|
||||
// APIGroupVersion is a helper for exposing rest.Storage objects as http.Handlers via go-restful
|
||||
// It handles URLs of the form:
|
||||
// /${storage_key}[/${object_name}]
|
||||
// Where 'storage_key' points to a rest.Storage object stored in storage.
|
||||
// This object should contain all parameterization necessary for running a particular API version
|
||||
type APIGroupVersion struct {
|
||||
Storage map[string]rest.Storage
|
||||
|
||||
Root string
|
||||
|
||||
// GroupVersion is the external group version
|
||||
GroupVersion schema.GroupVersion
|
||||
|
||||
// AllServedVersionsByResource is indexed by resource and maps to a list of versions that resource exists in.
|
||||
// This was created so that StorageVersion for APIs can include a list of all version that are served for each
|
||||
// GroupResource tuple.
|
||||
AllServedVersionsByResource map[string][]string
|
||||
|
||||
// OptionsExternalVersion controls the Kubernetes APIVersion used for common objects in the apiserver
|
||||
// schema like api.Status, api.DeleteOptions, and metav1.ListOptions. Other implementors may
|
||||
// define a version "v1beta1" but want to use the Kubernetes "v1" internal objects. If
|
||||
// empty, defaults to GroupVersion.
|
||||
OptionsExternalVersion *schema.GroupVersion
|
||||
// MetaGroupVersion defaults to "meta.k8s.io/v1" and is the scheme group version used to decode
|
||||
// common API implementations like ListOptions. Future changes will allow this to vary by group
|
||||
// version (for when the inevitable meta/v2 group emerges).
|
||||
MetaGroupVersion *schema.GroupVersion
|
||||
|
||||
// Serializer is used to determine how to convert responses from API methods into bytes to send over
|
||||
// the wire.
|
||||
Serializer runtime.NegotiatedSerializer
|
||||
ParameterCodec runtime.ParameterCodec
|
||||
|
||||
Typer runtime.ObjectTyper
|
||||
Creater runtime.ObjectCreater
|
||||
Convertor runtime.ObjectConvertor
|
||||
ConvertabilityChecker ConvertabilityChecker
|
||||
Defaulter runtime.ObjectDefaulter
|
||||
Namer runtime.Namer
|
||||
UnsafeConvertor runtime.ObjectConvertor
|
||||
TypeConverter managedfields.TypeConverter
|
||||
|
||||
EquivalentResourceRegistry runtime.EquivalentResourceRegistry
|
||||
|
||||
// Authorizer determines whether a user is allowed to make a certain request. The Handler does a preliminary
|
||||
// authorization check using the request URI but it may be necessary to make additional checks, such as in
|
||||
// the create-on-update case
|
||||
Authorizer authorizer.Authorizer
|
||||
|
||||
Admit admission.Interface
|
||||
|
||||
MinRequestTimeout time.Duration
|
||||
|
||||
// The limit on the request body size that would be accepted and decoded in a write request.
|
||||
// 0 means no limit.
|
||||
MaxRequestBodyBytes int64
|
||||
}
|
||||
|
||||
// InstallREST registers the REST handlers (storage, watch, proxy and redirect) into a restful Container.
|
||||
// It is expected that the provided path root prefix will serve all operations. Root MUST NOT end
|
||||
// in a slash.
|
||||
func (g *APIGroupVersion) InstallREST(container *restful.Container) ([]apidiscoveryv2.APIResourceDiscovery, []*storageversion.ResourceInfo, error) {
|
||||
prefix := path.Join(g.Root, g.GroupVersion.Group, g.GroupVersion.Version)
|
||||
installer := &APIInstaller{
|
||||
group: g,
|
||||
prefix: prefix,
|
||||
minRequestTimeout: g.MinRequestTimeout,
|
||||
}
|
||||
|
||||
apiResources, resourceInfos, ws, registrationErrors := installer.Install()
|
||||
versionDiscoveryHandler := discovery.NewAPIVersionHandler(g.Serializer, g.GroupVersion, staticLister{apiResources})
|
||||
versionDiscoveryHandler.AddToWebService(ws)
|
||||
container.Add(ws)
|
||||
aggregatedDiscoveryResources, err := ConvertGroupVersionIntoToDiscovery(apiResources)
|
||||
if err != nil {
|
||||
registrationErrors = append(registrationErrors, err)
|
||||
}
|
||||
return aggregatedDiscoveryResources, removeNonPersistedResources(resourceInfos), utilerrors.NewAggregate(registrationErrors)
|
||||
}
|
||||
|
||||
func removeNonPersistedResources(infos []*storageversion.ResourceInfo) []*storageversion.ResourceInfo {
|
||||
var filtered []*storageversion.ResourceInfo
|
||||
for _, info := range infos {
|
||||
// if EncodingVersion is empty, then the apiserver does not
|
||||
// need to register this resource via the storage version API,
|
||||
// thus we can remove it.
|
||||
if info != nil && len(info.EncodingVersion) > 0 {
|
||||
filtered = append(filtered, info)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// staticLister implements the APIResourceLister interface
|
||||
type staticLister struct {
|
||||
list []metav1.APIResource
|
||||
}
|
||||
|
||||
func (s staticLister) ListAPIResources() []metav1.APIResource {
|
||||
return s.list
|
||||
}
|
||||
|
||||
var _ discovery.APIResourceLister = &staticLister{}
|
284
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go
generated
vendored
Normal file
284
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
|
||||
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/util/dryrun"
|
||||
"k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var namespaceGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}
|
||||
|
||||
func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Interface, includeName bool) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
// For performance tracking purposes.
|
||||
ctx, span := tracing.Start(ctx, "Create", traceFields(req)...)
|
||||
req = req.WithContext(ctx)
|
||||
defer span.End(500 * time.Millisecond)
|
||||
|
||||
namespace, name, err := scope.Namer.Name(req)
|
||||
if err != nil {
|
||||
if includeName {
|
||||
// name was required, return
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise attempt to look up the namespace
|
||||
namespace, err = scope.Namer.Namespace(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
|
||||
// timeout inside the parent context is lower than requestTimeoutUpperBound.
|
||||
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
|
||||
defer cancel()
|
||||
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
gv := scope.Kind.GroupVersion()
|
||||
s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Create)
|
||||
if err != nil {
|
||||
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
|
||||
|
||||
options := &metav1.CreateOptions{}
|
||||
values := req.URL.Query()
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, options); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
if errs := validation.ValidateCreateOptions(options); len(errs) > 0 {
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "CreateOptions"}, "", errs)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions"))
|
||||
|
||||
defaultGVK := scope.Kind
|
||||
original := r.New()
|
||||
|
||||
validationDirective := fieldValidation(options.FieldValidation)
|
||||
decodeSerializer := s.Serializer
|
||||
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
|
||||
decodeSerializer = s.StrictSerializer
|
||||
}
|
||||
|
||||
decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion)
|
||||
span.AddEvent("About to convert to expected version")
|
||||
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
|
||||
if err != nil {
|
||||
strictError, isStrictError := runtime.AsStrictDecodingError(err)
|
||||
switch {
|
||||
case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn:
|
||||
addStrictDecodingWarnings(req.Context(), strictError.Errors())
|
||||
case isStrictError && validationDirective == metav1.FieldValidationIgnore:
|
||||
klog.Warningf("unexpected strict error when field validation is set to ignore")
|
||||
fallthrough
|
||||
default:
|
||||
err = transformDecodeError(scope.Typer, err, original, gvk, body)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objGV := gvk.GroupVersion()
|
||||
if !scope.AcceptsGroupVersion(objGV) {
|
||||
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%v)", objGV.String(), gv.String()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Conversion done")
|
||||
|
||||
// On create, get name from new object if unset
|
||||
if len(name) == 0 {
|
||||
_, name, _ = scope.Namer.ObjectName(obj)
|
||||
}
|
||||
if len(namespace) == 0 && scope.Resource == namespaceGVR {
|
||||
namespace = name
|
||||
}
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
|
||||
admit = admission.WithAudit(admit)
|
||||
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)
|
||||
|
||||
userInfo, _ := request.UserFrom(ctx)
|
||||
|
||||
if objectMeta, err := meta.Accessor(obj); err == nil {
|
||||
preserveObjectMetaSystemFields := false
|
||||
if c, ok := r.(rest.SubresourceObjectMetaPreserver); ok && len(scope.Subresource) > 0 {
|
||||
preserveObjectMetaSystemFields = c.PreserveRequestObjectMetaSystemFieldsOnSubresourceCreate()
|
||||
}
|
||||
if !preserveObjectMetaSystemFields {
|
||||
rest.WipeObjectMetaSystemFields(objectMeta)
|
||||
}
|
||||
|
||||
// ensure namespace on the object is correct, or error if a conflicting namespace was set in the object
|
||||
if err := rest.EnsureObjectNamespaceMatchesRequestNamespace(rest.ExpectedNamespaceForResource(namespace, scope.Resource), objectMeta); err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
span.AddEvent("About to store object in database")
|
||||
admissionAttributes := admission.NewAttributesRecord(obj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, options, dryrun.IsDryRun(options.DryRun), userInfo)
|
||||
requestFunc := func() (runtime.Object, error) {
|
||||
return r.Create(
|
||||
ctx,
|
||||
name,
|
||||
obj,
|
||||
rest.AdmissionToValidateObjectFunc(admit, admissionAttributes, scope),
|
||||
options,
|
||||
)
|
||||
}
|
||||
// Dedup owner references before updating managed fields
|
||||
dedupOwnerReferencesAndAddWarning(obj, req.Context(), false)
|
||||
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
|
||||
liveObj, err := scope.Creater.New(scope.Kind)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err)
|
||||
}
|
||||
obj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))
|
||||
admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)
|
||||
|
||||
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {
|
||||
if err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Dedup owner references again after mutating admission happens
|
||||
dedupOwnerReferencesAndAddWarning(obj, req.Context(), true)
|
||||
result, err := requestFunc()
|
||||
// If the object wasn't committed to storage because it's serialized size was too large,
|
||||
// it is safe to remove managedFields (which can be large) and try again.
|
||||
if isTooLargeError(err) {
|
||||
if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {
|
||||
accessor.SetManagedFields(nil)
|
||||
result, err = requestFunc()
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
})
|
||||
if err != nil {
|
||||
span.AddEvent("Write to database call failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Write to database call succeeded", attribute.Int("len", len(body)))
|
||||
|
||||
code := http.StatusCreated
|
||||
status, ok := result.(*metav1.Status)
|
||||
if ok && status.Code == 0 {
|
||||
status.Code = int32(code)
|
||||
}
|
||||
|
||||
span.AddEvent("About to write a response")
|
||||
defer span.AddEvent("Writing http response done")
|
||||
transformResponseObject(ctx, scope, req, w, code, outputMediaType, result)
|
||||
}
|
||||
}
|
||||
|
||||
// CreateNamedResource returns a function that will handle a resource creation with name.
|
||||
func CreateNamedResource(r rest.NamedCreater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {
|
||||
return createHandler(r, scope, admission, true)
|
||||
}
|
||||
|
||||
// CreateResource returns a function that will handle a resource creation.
|
||||
func CreateResource(r rest.Creater, scope *RequestScope, admission admission.Interface) http.HandlerFunc {
|
||||
return createHandler(&namedCreaterAdapter{r}, scope, admission, false)
|
||||
}
|
||||
|
||||
type namedCreaterAdapter struct {
|
||||
rest.Creater
|
||||
}
|
||||
|
||||
func (c *namedCreaterAdapter) Create(ctx context.Context, name string, obj runtime.Object, createValidatingAdmission rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
||||
return c.Creater.Create(ctx, obj, createValidatingAdmission, options)
|
||||
}
|
||||
|
||||
// manager is assumed to be already a valid value, we need to make
|
||||
// userAgent into a valid value too.
|
||||
func managerOrUserAgent(manager, userAgent string) string {
|
||||
if manager != "" {
|
||||
return manager
|
||||
}
|
||||
return prefixFromUserAgent(userAgent)
|
||||
}
|
||||
|
||||
// prefixFromUserAgent takes the characters preceding the first /, quote
|
||||
// unprintable character and then trim what's beyond the
|
||||
// FieldManagerMaxLength limit.
|
||||
func prefixFromUserAgent(u string) string {
|
||||
m := strings.Split(u, "/")[0]
|
||||
buf := bytes.NewBuffer(nil)
|
||||
for _, r := range m {
|
||||
// Ignore non-printable characters
|
||||
if !unicode.IsPrint(r) {
|
||||
continue
|
||||
}
|
||||
// Only append if we have room for it
|
||||
if buf.Len()+utf8.RuneLen(r) > validation.FieldManagerMaxLength {
|
||||
break
|
||||
}
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
422
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go
generated
vendored
Normal file
422
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go
generated
vendored
Normal file
@ -0,0 +1,422 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||
metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
|
||||
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/util/apihelpers"
|
||||
"k8s.io/apiserver/pkg/util/dryrun"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/tracing"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// DeleteResource returns a function that will handle a resource deletion
|
||||
// TODO admission here becomes solely validating admission
|
||||
func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
// For performance tracking purposes.
|
||||
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
|
||||
req = req.WithContext(ctx)
|
||||
defer span.End(500 * time.Millisecond)
|
||||
|
||||
namespace, name, err := scope.Namer.Name(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
|
||||
// timeout inside the parent context is lower than requestTimeoutUpperBound.
|
||||
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
|
||||
defer cancel()
|
||||
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
admit = admission.WithAudit(admit)
|
||||
|
||||
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
options := &metav1.DeleteOptions{}
|
||||
if allowsOptions {
|
||||
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Delete)
|
||||
if err != nil {
|
||||
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
|
||||
if len(body) > 0 {
|
||||
s, err := negotiation.NegotiateInputSerializer(req, false, apihelpers.GetMetaInternalVersionCodecs())
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
|
||||
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
|
||||
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
|
||||
obj, gvk, err := apihelpers.GetMetaInternalVersionCodecs().DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
if obj != options {
|
||||
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Decoded delete options")
|
||||
|
||||
objGV := gvk.GroupVersion()
|
||||
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, apihelpers.GetMetaInternalVersionCodecs())
|
||||
span.AddEvent("Recorded the audit event")
|
||||
} else {
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) && options != nil {
|
||||
options.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
|
||||
}
|
||||
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
|
||||
|
||||
userInfo, _ := request.UserFrom(ctx)
|
||||
staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) {
|
||||
if options != nil && ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) {
|
||||
// let's make sure that the audit will reflect that this delete request
|
||||
// was tried with ignoreStoreReadErrorWithClusterBreakingPotential enabled
|
||||
audit.AddAuditAnnotation(ctx, "apiserver.k8s.io/unsafe-delete-ignore-read-error", "")
|
||||
|
||||
p, ok := r.(rest.CorruptObjectDeleterProvider)
|
||||
if !ok || p.GetCorruptObjDeleter() == nil {
|
||||
// this is a developer error
|
||||
scope.err(errors.NewInternalError(fmt.Errorf("no unsafe deleter provided, can not honor ignoreStoreReadErrorWithClusterBreakingPotential")), w, req)
|
||||
return
|
||||
}
|
||||
if scope.Authorizer == nil {
|
||||
scope.err(errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize unsafe delete")), w, req)
|
||||
return
|
||||
}
|
||||
if err := authorizeUnsafeDelete(ctx, staticAdmissionAttrs, scope.Authorizer); err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
r = p.GetCorruptObjDeleter()
|
||||
}
|
||||
}
|
||||
|
||||
span.AddEvent("About to delete object from database")
|
||||
wasDeleted := true
|
||||
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
|
||||
obj, deleted, err := r.Delete(ctx, name, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options)
|
||||
wasDeleted = deleted
|
||||
return obj, err
|
||||
})
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Object deleted from database")
|
||||
|
||||
status := http.StatusOK
|
||||
// Return http.StatusAccepted if the resource was not deleted immediately and
|
||||
// user requested cascading deletion by setting OrphanDependents=false.
|
||||
// Note: We want to do this always if resource was not deleted immediately, but
|
||||
// that will break existing clients.
|
||||
// Other cases where resource is not instantly deleted are: namespace deletion
|
||||
// and pod graceful deletion.
|
||||
//nolint:staticcheck // SA1019 backwards compatibility
|
||||
//nolint: staticcheck
|
||||
if !wasDeleted && options.OrphanDependents != nil && !*options.OrphanDependents {
|
||||
status = http.StatusAccepted
|
||||
}
|
||||
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
|
||||
// object with the response.
|
||||
if result == nil {
|
||||
result = &metav1.Status{
|
||||
Status: metav1.StatusSuccess,
|
||||
Code: int32(status),
|
||||
Details: &metav1.StatusDetails{
|
||||
Name: name,
|
||||
Kind: scope.Kind.Kind,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
span.AddEvent("About to write a response")
|
||||
defer span.AddEvent("Writing http response done")
|
||||
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteCollection returns a function that will handle a collection deletion
|
||||
func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
ctx, span := tracing.Start(ctx, "Delete", traceFields(req)...)
|
||||
req = req.WithContext(ctx)
|
||||
defer span.End(500 * time.Millisecond)
|
||||
|
||||
namespace, err := scope.Namer.Namespace(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// DELETECOLLECTION can be a lengthy operation,
|
||||
// we should not impose any 34s timeout here.
|
||||
// NOTE: This is similar to LIST which does not enforce a 34s timeout.
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
|
||||
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
listOptions := metainternalversion.ListOptions{}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &listOptions); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
metainternalversion.SetListOptionsDefaults(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList))
|
||||
if errs := metainternalversionvalidation.ValidateListOptions(&listOptions, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 {
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// transform fields
|
||||
// TODO: DecodeParametersInto should do this.
|
||||
if listOptions.FieldSelector != nil {
|
||||
fn := func(label, value string) (newLabel, newValue string, err error) {
|
||||
return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value)
|
||||
}
|
||||
if listOptions.FieldSelector, err = listOptions.FieldSelector.Transform(fn); err != nil {
|
||||
// TODO: allow bad request to set field causes based on query parameters
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
options := &metav1.DeleteOptions{}
|
||||
if checkBody {
|
||||
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.DeleteCollection)
|
||||
if err != nil {
|
||||
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
|
||||
if len(body) > 0 {
|
||||
s, err := negotiation.NegotiateInputSerializer(req, false, apihelpers.GetMetaInternalVersionCodecs())
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
// For backwards compatibility, we need to allow existing clients to submit per group DeleteOptions
|
||||
// It is also allowed to pass a body with meta.k8s.io/v1.DeleteOptions
|
||||
defaultGVK := scope.MetaGroupVersion.WithKind("DeleteOptions")
|
||||
obj, gvk, err := apihelpers.GetMetaInternalVersionCodecs().DecoderToVersion(s.Serializer, defaultGVK.GroupVersion()).Decode(body, &defaultGVK, options)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
if obj != options {
|
||||
scope.err(fmt.Errorf("decoded object cannot be converted to DeleteOptions"), w, req)
|
||||
return
|
||||
}
|
||||
|
||||
objGV := gvk.GroupVersion()
|
||||
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, apihelpers.GetMetaInternalVersionCodecs())
|
||||
} else {
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) && options != nil {
|
||||
options.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
|
||||
}
|
||||
if errs := validation.ValidateDeleteOptions(options); len(errs) > 0 {
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", errs)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AllowUnsafeMalformedObjectDeletion) {
|
||||
if options != nil && ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) {
|
||||
fieldErrList := field.ErrorList{
|
||||
field.Invalid(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), true, "is not allowed with DELETECOLLECTION, try again after removing the option"),
|
||||
}
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "DeleteOptions"}, "", fieldErrList)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("DeleteOptions"))
|
||||
|
||||
admit = admission.WithAudit(admit)
|
||||
userInfo, _ := request.UserFrom(ctx)
|
||||
staticAdmissionAttrs := admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, "", scope.Resource, scope.Subresource, admission.Delete, options, dryrun.IsDryRun(options.DryRun), userInfo)
|
||||
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
|
||||
return r.DeleteCollection(ctx, rest.AdmissionToValidateObjectDeleteFunc(admit, staticAdmissionAttrs, scope), options, &listOptions)
|
||||
})
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// if the rest.Deleter returns a nil object, fill out a status. Callers may return a valid
|
||||
// object with the response.
|
||||
if result == nil {
|
||||
result = &metav1.Status{
|
||||
Status: metav1.StatusSuccess,
|
||||
Code: http.StatusOK,
|
||||
Details: &metav1.StatusDetails{
|
||||
Kind: scope.Kind.Kind,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
span.AddEvent("About to write a response")
|
||||
defer span.AddEvent("Writing http response done")
|
||||
transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result)
|
||||
}
|
||||
}
|
||||
|
||||
// authorizeUnsafeDelete ensures that the user has permission to do
|
||||
// 'unsafe-delete-ignore-read-errors' on the resource being deleted when
|
||||
// ignoreStoreReadErrorWithClusterBreakingPotential is enabled
|
||||
func authorizeUnsafeDelete(ctx context.Context, attr admission.Attributes, authz authorizer.Authorizer) (err error) {
|
||||
if attr.GetOperation() != admission.Delete || attr.GetOperationOptions() == nil {
|
||||
return nil
|
||||
}
|
||||
options, ok := attr.GetOperationOptions().(*metav1.DeleteOptions)
|
||||
if !ok {
|
||||
return errors.NewInternalError(fmt.Errorf("expected an option of type: %T, but got: %T", &metav1.DeleteOptions{}, attr.GetOperationOptions()))
|
||||
}
|
||||
if !ptr.Deref(options.IgnoreStoreReadErrorWithClusterBreakingPotential, false) {
|
||||
return nil
|
||||
}
|
||||
|
||||
requestInfo, found := request.RequestInfoFrom(ctx)
|
||||
if !found {
|
||||
return admission.NewForbidden(attr, fmt.Errorf("no RequestInfo found in the context"))
|
||||
}
|
||||
if !requestInfo.IsResourceRequest || len(attr.GetSubresource()) > 0 {
|
||||
return admission.NewForbidden(attr, fmt.Errorf("ignoreStoreReadErrorWithClusterBreakingPotential delete option is not allowed on a subresource or non-resource request"))
|
||||
}
|
||||
|
||||
// if we are here, IgnoreStoreReadErrorWithClusterBreakingPotential
|
||||
// is set to true in the delete options, the user must have permission
|
||||
// to do 'unsafe-delete-ignore-read-errors' on the given resource.
|
||||
record := authorizer.AttributesRecord{
|
||||
User: attr.GetUserInfo(),
|
||||
Verb: "unsafe-delete-ignore-read-errors",
|
||||
Namespace: attr.GetNamespace(),
|
||||
Name: attr.GetName(),
|
||||
APIGroup: attr.GetResource().Group,
|
||||
APIVersion: attr.GetResource().Version,
|
||||
Resource: attr.GetResource().Resource,
|
||||
ResourceRequest: true,
|
||||
}
|
||||
// TODO: can't use ResourceAttributesFrom from k8s.io/kubernetes/pkg/registry/authorization/util
|
||||
// due to prevent staging --> k8s.io/kubernetes dep issue
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AuthorizeWithSelectors) {
|
||||
if len(requestInfo.FieldSelector) > 0 {
|
||||
fieldSelector, err := fields.ParseSelector(requestInfo.FieldSelector)
|
||||
if err != nil {
|
||||
record.FieldSelectorRequirements, record.FieldSelectorParsingErr = nil, err
|
||||
} else {
|
||||
if requirements := fieldSelector.Requirements(); len(requirements) > 0 {
|
||||
record.FieldSelectorRequirements, record.FieldSelectorParsingErr = fieldSelector.Requirements(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(requestInfo.LabelSelector) > 0 {
|
||||
labelSelector, err := labels.Parse(requestInfo.LabelSelector)
|
||||
if err != nil {
|
||||
record.LabelSelectorRequirements, record.LabelSelectorParsingErr = nil, err
|
||||
} else {
|
||||
if requirements, _ /*selectable*/ := labelSelector.Requirements(); len(requirements) > 0 {
|
||||
record.LabelSelectorRequirements, record.LabelSelectorParsingErr = requirements, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
decision, reason, err := authz.Authorize(ctx, record)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error while checking permission for %q, %w", record.Verb, err)
|
||||
klog.FromContext(ctx).V(1).Error(err, "failed to authorize")
|
||||
return admission.NewForbidden(attr, err)
|
||||
}
|
||||
if decision == authorizer.DecisionAllow {
|
||||
return nil
|
||||
}
|
||||
|
||||
return admission.NewForbidden(attr, fmt.Errorf("not permitted to do %q, reason: %s", record.Verb, reason))
|
||||
}
|
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go
generated
vendored
Normal file
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package handlers contains HTTP handlers to implement the apiserver APIs.
|
||||
package handlers // import "k8s.io/apiserver/pkg/endpoints/handlers"
|
6
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS
generated
vendored
Normal file
6
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
approvers:
|
||||
- apelisse
|
||||
reviewers:
|
||||
- kwiesmueller
|
||||
emeritus_approvers:
|
||||
- jennybuckley
|
90
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go
generated
vendored
Normal file
90
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/admission.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/warning"
|
||||
)
|
||||
|
||||
// InvalidManagedFieldsAfterMutatingAdmissionWarningFormat is the warning that a client receives
|
||||
// when a create/update/patch request results in invalid managedFields after going through the admission chain.
|
||||
const InvalidManagedFieldsAfterMutatingAdmissionWarningFormat = ".metadata.managedFields was in an invalid state after admission; this could be caused by an outdated mutating admission controller; please fix your requests: %v"
|
||||
|
||||
// NewManagedFieldsValidatingAdmissionController validates the managedFields after calling
|
||||
// the provided admission and resets them to their original state if they got changed to an invalid value
|
||||
func NewManagedFieldsValidatingAdmissionController(wrap admission.Interface) admission.Interface {
|
||||
if wrap == nil {
|
||||
return nil
|
||||
}
|
||||
return &managedFieldsValidatingAdmissionController{wrap: wrap}
|
||||
}
|
||||
|
||||
type managedFieldsValidatingAdmissionController struct {
|
||||
wrap admission.Interface
|
||||
}
|
||||
|
||||
var _ admission.Interface = &managedFieldsValidatingAdmissionController{}
|
||||
var _ admission.MutationInterface = &managedFieldsValidatingAdmissionController{}
|
||||
var _ admission.ValidationInterface = &managedFieldsValidatingAdmissionController{}
|
||||
|
||||
// Handles calls the wrapped admission.Interface if applicable
|
||||
func (admit *managedFieldsValidatingAdmissionController) Handles(operation admission.Operation) bool {
|
||||
return admit.wrap.Handles(operation)
|
||||
}
|
||||
|
||||
// Admit calls the wrapped admission.Interface if applicable and resets the managedFields to their state before admission if they
|
||||
// got modified in an invalid way
|
||||
func (admit *managedFieldsValidatingAdmissionController) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) (err error) {
|
||||
mutationInterface, isMutationInterface := admit.wrap.(admission.MutationInterface)
|
||||
if !isMutationInterface {
|
||||
return nil
|
||||
}
|
||||
objectMeta, err := meta.Accessor(a.GetObject())
|
||||
if err != nil {
|
||||
// the object we are dealing with doesn't have object metadata defined
|
||||
// in that case we don't have to keep track of the managedField
|
||||
// just call the wrapped admission
|
||||
return mutationInterface.Admit(ctx, a, o)
|
||||
}
|
||||
managedFieldsBeforeAdmission := objectMeta.GetManagedFields()
|
||||
if err := mutationInterface.Admit(ctx, a, o); err != nil {
|
||||
return err
|
||||
}
|
||||
managedFieldsAfterAdmission := objectMeta.GetManagedFields()
|
||||
if err := managedfields.ValidateManagedFields(managedFieldsAfterAdmission); err != nil {
|
||||
objectMeta.SetManagedFields(managedFieldsBeforeAdmission)
|
||||
warning.AddWarning(ctx, "",
|
||||
fmt.Sprintf(InvalidManagedFieldsAfterMutatingAdmissionWarningFormat,
|
||||
err.Error()),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate calls the wrapped admission.Interface if aplicable
|
||||
func (admit *managedFieldsValidatingAdmissionController) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) (err error) {
|
||||
if validationInterface, isValidationInterface := admit.wrap.(admission.ValidationInterface); isValidationInterface {
|
||||
return validationInterface.Validate(ctx, a, o)
|
||||
}
|
||||
return nil
|
||||
}
|
7018
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml
generated
vendored
Normal file
7018
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/endpoints.yaml
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
226
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/equality.go
generated
vendored
Normal file
226
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/equality.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fieldmanager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
avoidTimestampEqualities conversion.Equalities
|
||||
initAvoidTimestampEqualities sync.Once
|
||||
)
|
||||
|
||||
func getAvoidTimestampEqualities() conversion.Equalities {
|
||||
initAvoidTimestampEqualities.Do(func() {
|
||||
if avoidNoopTimestampUpdatesString, exists := os.LookupEnv("KUBE_APISERVER_AVOID_NOOP_SSA_TIMESTAMP_UPDATES"); exists {
|
||||
if ret, err := strconv.ParseBool(avoidNoopTimestampUpdatesString); err == nil && !ret {
|
||||
// leave avoidTimestampEqualities empty.
|
||||
return
|
||||
} else {
|
||||
klog.Errorf("failed to parse envar KUBE_APISERVER_AVOID_NOOP_SSA_TIMESTAMP_UPDATES: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var eqs = equality.Semantic.Copy()
|
||||
err := eqs.AddFuncs(
|
||||
func(a, b metav1.ManagedFieldsEntry) bool {
|
||||
// Two objects' managed fields are equivalent if, ignoring timestamp,
|
||||
// the objects are deeply equal.
|
||||
a.Time = nil
|
||||
b.Time = nil
|
||||
return reflect.DeepEqual(a, b)
|
||||
},
|
||||
func(a, b unstructured.Unstructured) bool {
|
||||
// Check if the managed fields are equal by converting to structured types and leveraging the above
|
||||
// function, then, ignoring the managed fields, equality check the rest of the unstructured data.
|
||||
if !avoidTimestampEqualities.DeepEqual(a.GetManagedFields(), b.GetManagedFields()) {
|
||||
return false
|
||||
}
|
||||
return equalIgnoringValueAtPath(a.Object, b.Object, []string{"metadata", "managedFields"})
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to instantiate semantic equalities: %w", err))
|
||||
}
|
||||
|
||||
avoidTimestampEqualities = eqs
|
||||
})
|
||||
return avoidTimestampEqualities
|
||||
}
|
||||
|
||||
func equalIgnoringValueAtPath(a, b any, path []string) bool {
|
||||
if len(path) == 0 { // found the value to ignore
|
||||
return true
|
||||
}
|
||||
aMap, aOk := a.(map[string]any)
|
||||
bMap, bOk := b.(map[string]any)
|
||||
if !aOk || !bOk {
|
||||
// Can't traverse into non-maps, ignore
|
||||
return true
|
||||
}
|
||||
if len(aMap) != len(bMap) {
|
||||
return false
|
||||
}
|
||||
pathHead := path[0]
|
||||
for k, aVal := range aMap {
|
||||
bVal, ok := bMap[k]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if k == pathHead {
|
||||
if !equalIgnoringValueAtPath(aVal, bVal, path[1:]) {
|
||||
return false
|
||||
}
|
||||
} else if !avoidTimestampEqualities.DeepEqual(aVal, bVal) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IgnoreManagedFieldsTimestampsTransformer reverts timestamp updates
|
||||
// if the non-managed parts of the object are equivalent
|
||||
func IgnoreManagedFieldsTimestampsTransformer(
|
||||
_ context.Context,
|
||||
newObj runtime.Object,
|
||||
oldObj runtime.Object,
|
||||
) (res runtime.Object, err error) {
|
||||
equalities := getAvoidTimestampEqualities()
|
||||
if len(equalities.Equalities) == 0 {
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
outcome := "unequal_objects_fast"
|
||||
start := time.Now()
|
||||
err = nil
|
||||
res = nil
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
outcome = "error"
|
||||
}
|
||||
|
||||
metrics.RecordTimestampComparisonLatency(outcome, time.Since(start))
|
||||
}()
|
||||
|
||||
// If managedFields modulo timestamps are unchanged
|
||||
// and
|
||||
// rest of object is unchanged
|
||||
// then
|
||||
// revert any changes to timestamps in managed fields
|
||||
// (to prevent spurious ResourceVersion bump)
|
||||
//
|
||||
// Procecure:
|
||||
// Do a quicker check to see if just managed fields modulo timestamps are
|
||||
// unchanged. If so, then do the full, slower check.
|
||||
//
|
||||
// In most cases which actually update the object, the managed fields modulo
|
||||
// timestamp check will fail, and we will be able to return early.
|
||||
//
|
||||
// In other cases, the managed fields may be exactly the same,
|
||||
// except for timestamp, but the objects are the different. This is the
|
||||
// slow path which checks the full object.
|
||||
oldAccessor, err := meta.Accessor(oldObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire accessor for oldObj: %v", err)
|
||||
}
|
||||
|
||||
accessor, err := meta.Accessor(newObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire accessor for newObj: %v", err)
|
||||
}
|
||||
|
||||
oldManagedFields := oldAccessor.GetManagedFields()
|
||||
newManagedFields := accessor.GetManagedFields()
|
||||
|
||||
if len(oldManagedFields) != len(newManagedFields) {
|
||||
// Return early if any managed fields entry was added/removed.
|
||||
// We want to retain user expectation that even if they write to a field
|
||||
// whose value did not change, they will still result as the field
|
||||
// manager at the end.
|
||||
return newObj, nil
|
||||
} else if len(newManagedFields) == 0 {
|
||||
// This transformation only makes sense when managedFields are
|
||||
// non-empty
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
// This transformation only makes sense if the managed fields has at least one
|
||||
// changed timestamp; and are otherwise equal. Return early if there are no
|
||||
// changed timestamps.
|
||||
allTimesUnchanged := true
|
||||
for i, e := range newManagedFields {
|
||||
if !e.Time.Equal(oldManagedFields[i].Time) {
|
||||
allTimesUnchanged = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allTimesUnchanged {
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
eqFn := equalities.DeepEqual
|
||||
if _, ok := newObj.(*unstructured.Unstructured); ok {
|
||||
// Use strict equality with unstructured
|
||||
eqFn = equalities.DeepEqualWithNilDifferentFromEmpty
|
||||
}
|
||||
|
||||
// This condition ensures the managed fields are always compared first. If
|
||||
// this check fails, the if statement will short circuit. If the check
|
||||
// succeeds the slow path is taken which compares entire objects.
|
||||
if !eqFn(oldManagedFields, newManagedFields) {
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
if eqFn(newObj, oldObj) {
|
||||
// Remove any changed timestamps, so that timestamp is not the only
|
||||
// change seen by etcd.
|
||||
//
|
||||
// newManagedFields is known to be exactly pairwise equal to
|
||||
// oldManagedFields except for timestamps.
|
||||
//
|
||||
// Simply replace possibly changed new timestamps with their old values.
|
||||
for idx := 0; idx < len(oldManagedFields); idx++ {
|
||||
newManagedFields[idx].Time = oldManagedFields[idx].Time
|
||||
}
|
||||
|
||||
accessor.SetManagedFields(newManagedFields)
|
||||
outcome = "equal_objects"
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
outcome = "unequal_objects_slow"
|
||||
return newObj, nil
|
||||
}
|
261
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml
generated
vendored
Normal file
261
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/node.yaml
generated
vendored
Normal file
@ -0,0 +1,261 @@
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
annotations:
|
||||
container.googleapis.com/instance_id: "123456789321654789"
|
||||
node.alpha.kubernetes.io/ttl: "0"
|
||||
volumes.kubernetes.io/controller-managed-attach-detach: "true"
|
||||
creationTimestamp: "2019-07-09T16:17:29Z"
|
||||
labels:
|
||||
kubernetes.io/arch: amd64
|
||||
beta.kubernetes.io/fluentd-ds-ready: "true"
|
||||
beta.kubernetes.io/instance-type: n1-standard-4
|
||||
kubernetes.io/os: linux
|
||||
cloud.google.com/gke-nodepool: default-pool
|
||||
cloud.google.com/gke-os-distribution: cos
|
||||
failure-domain.beta.kubernetes.io/region: us-central1
|
||||
failure-domain.beta.kubernetes.io/zone: us-central1-b
|
||||
topology.kubernetes.io/region: us-central1
|
||||
topology.kubernetes.io/zone: us-central1-b
|
||||
kubernetes.io/hostname: node-default-pool-something
|
||||
name: node-default-pool-something
|
||||
resourceVersion: "211582541"
|
||||
selfLink: /api/v1/nodes/node-default-pool-something
|
||||
uid: 0c24d0e1-a265-11e9-abe4-42010a80026b
|
||||
spec:
|
||||
podCIDR: 10.0.0.1/24
|
||||
providerID: some-provider-id-of-some-sort
|
||||
status:
|
||||
addresses:
|
||||
- address: 10.0.0.1
|
||||
type: InternalIP
|
||||
- address: 192.168.0.1
|
||||
type: ExternalIP
|
||||
- address: node-default-pool-something
|
||||
type: Hostname
|
||||
allocatable:
|
||||
cpu: 3920m
|
||||
ephemeral-storage: "104638878617"
|
||||
hugepages-2Mi: "0"
|
||||
memory: 12700100Ki
|
||||
pods: "110"
|
||||
capacity:
|
||||
cpu: "4"
|
||||
ephemeral-storage: 202086868Ki
|
||||
hugepages-2Mi: "0"
|
||||
memory: 15399364Ki
|
||||
pods: "110"
|
||||
conditions:
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:08Z"
|
||||
message: containerd is functioning properly
|
||||
reason: FrequentContainerdRestart
|
||||
status: "False"
|
||||
type: FrequentContainerdRestart
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:06Z"
|
||||
message: docker overlay2 is functioning properly
|
||||
reason: CorruptDockerOverlay2
|
||||
status: "False"
|
||||
type: CorruptDockerOverlay2
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:06Z"
|
||||
message: node is functioning properly
|
||||
reason: UnregisterNetDevice
|
||||
status: "False"
|
||||
type: FrequentUnregisterNetDevice
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:04Z"
|
||||
message: kernel has no deadlock
|
||||
reason: KernelHasNoDeadlock
|
||||
status: "False"
|
||||
type: KernelDeadlock
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:04Z"
|
||||
message: Filesystem is not read-only
|
||||
reason: FilesystemIsNotReadOnly
|
||||
status: "False"
|
||||
type: ReadonlyFilesystem
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:05Z"
|
||||
message: kubelet is functioning properly
|
||||
reason: FrequentKubeletRestart
|
||||
status: "False"
|
||||
type: FrequentKubeletRestart
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:06Z"
|
||||
message: docker is functioning properly
|
||||
reason: FrequentDockerRestart
|
||||
status: "False"
|
||||
type: FrequentDockerRestart
|
||||
- lastHeartbeatTime: "2019-07-09T16:17:47Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:47Z"
|
||||
message: RouteController created a route
|
||||
reason: RouteCreated
|
||||
status: "False"
|
||||
type: NetworkUnavailable
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has sufficient disk space available
|
||||
reason: KubeletHasSufficientDisk
|
||||
status: "False"
|
||||
type: OutOfDisk
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has sufficient memory available
|
||||
reason: KubeletHasSufficientMemory
|
||||
status: "False"
|
||||
type: MemoryPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has no disk pressure
|
||||
reason: KubeletHasNoDiskPressure
|
||||
status: "False"
|
||||
type: DiskPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has sufficient PID available
|
||||
reason: KubeletHasSufficientPID
|
||||
status: "False"
|
||||
type: PIDPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:49Z"
|
||||
message: kubelet is posting ready status
|
||||
reason: KubeletReady
|
||||
status: "True"
|
||||
type: Ready
|
||||
daemonEndpoints:
|
||||
kubeletEndpoint:
|
||||
Port: 10250
|
||||
images:
|
||||
- names:
|
||||
- grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8
|
||||
- grafana/grafana:4.4.2
|
||||
sizeBytes: 287008013
|
||||
- names:
|
||||
- registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b
|
||||
- registry.k8s.io/node-problem-detector:v0.4.1
|
||||
sizeBytes: 286572743
|
||||
- names:
|
||||
- grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033
|
||||
- grafana/grafana:4.2.0
|
||||
sizeBytes: 277940263
|
||||
- names:
|
||||
- influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc
|
||||
- influxdb:1.2.2
|
||||
sizeBytes: 223948571
|
||||
- names:
|
||||
- gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2
|
||||
- gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1
|
||||
sizeBytes: 223242132
|
||||
- names:
|
||||
- nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b
|
||||
- nginx:1.10.1
|
||||
sizeBytes: 180708613
|
||||
- names:
|
||||
- registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73
|
||||
- registry.k8s.io/fluentd-elasticsearch:v2.0.4
|
||||
sizeBytes: 135716379
|
||||
- names:
|
||||
- nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f
|
||||
- nginx:latest
|
||||
sizeBytes: 109357355
|
||||
- names:
|
||||
- registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0
|
||||
- registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
sizeBytes: 102319441
|
||||
- names:
|
||||
- gcr.io/google_containers/kube-proxy:v1.11.10-gke.5
|
||||
- registry.k8s.io/kube-proxy:v1.11.10-gke.5
|
||||
sizeBytes: 102279340
|
||||
- names:
|
||||
- registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516
|
||||
- registry.k8s.io/event-exporter:v0.2.3
|
||||
sizeBytes: 94171943
|
||||
- names:
|
||||
- registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662
|
||||
- registry.k8s.io/prometheus-to-sd:v0.3.1
|
||||
sizeBytes: 88077694
|
||||
- names:
|
||||
- registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff
|
||||
- registry.k8s.io/fluentd-gcp-scaler:0.5.1
|
||||
sizeBytes: 86637208
|
||||
- names:
|
||||
- registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521
|
||||
- registry.k8s.io/heapster-amd64:v1.6.0-beta.1
|
||||
sizeBytes: 76016169
|
||||
- names:
|
||||
- registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52
|
||||
- registry.k8s.io/ingress-glbc-amd64:v1.1.1
|
||||
sizeBytes: 67801919
|
||||
- names:
|
||||
- registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09
|
||||
- registry.k8s.io/kube-addon-manager:v8.7
|
||||
sizeBytes: 63322109
|
||||
- names:
|
||||
- nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
|
||||
- nginx:1.10-alpine
|
||||
sizeBytes: 54042627
|
||||
- names:
|
||||
- registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869
|
||||
- registry.k8s.io/cpvpa-amd64:v0.6.0
|
||||
sizeBytes: 51785854
|
||||
- names:
|
||||
- registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d
|
||||
- registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
sizeBytes: 49648481
|
||||
- names:
|
||||
- registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103
|
||||
- registry.k8s.io/ip-masq-agent-amd64:v2.1.1
|
||||
sizeBytes: 49612505
|
||||
- names:
|
||||
- registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8
|
||||
- registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
sizeBytes: 49549457
|
||||
- names:
|
||||
- registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450
|
||||
- registry.k8s.io/rescheduler:v0.4.0
|
||||
sizeBytes: 48973149
|
||||
- names:
|
||||
- registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc
|
||||
- registry.k8s.io/event-exporter:v0.2.4
|
||||
sizeBytes: 47261019
|
||||
- names:
|
||||
- registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848
|
||||
- registry.k8s.io/coredns:1.1.3
|
||||
sizeBytes: 45587362
|
||||
- names:
|
||||
- prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74
|
||||
- prom/prometheus:v1.1.3
|
||||
sizeBytes: 45493941
|
||||
nodeInfo:
|
||||
architecture: amd64
|
||||
bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61
|
||||
containerRuntimeVersion: docker://17.3.2
|
||||
kernelVersion: 4.14.127+
|
||||
kubeProxyVersion: v1.11.10-gke.5
|
||||
kubeletVersion: v1.11.10-gke.5
|
||||
machineID: 1739555e5b231057f0f9a0b5fa29511b
|
||||
operatingSystem: linux
|
||||
osImage: Container-Optimized OS from Google
|
||||
systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B
|
||||
volumesAttached:
|
||||
- devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
|
||||
- devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
|
||||
volumesInUse:
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
|
121
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml
generated
vendored
Normal file
121
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/pod.yaml
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: some-app
|
||||
plugin1: some-value
|
||||
plugin2: some-value
|
||||
plugin3: some-value
|
||||
plugin4: some-value
|
||||
name: some-name
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: ReplicaSet
|
||||
name: some-name
|
||||
uid: 0a9d2b9e-779e-11e7-b422-42010a8001be
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
- four
|
||||
- five
|
||||
- six
|
||||
- seven
|
||||
- eight
|
||||
- nine
|
||||
env:
|
||||
- name: VAR_3
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: some-other-key
|
||||
name: some-oher-name
|
||||
- name: VAR_2
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: other-key
|
||||
name: other-name
|
||||
- name: VAR_1
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: some-key
|
||||
name: some-name
|
||||
image: some-image-name
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: some-name
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0'
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
name: default-token-hu5jz
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
nodeName: node-name
|
||||
priority: 0
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
serviceAccount: default
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
volumes:
|
||||
- name: default-token-hu5jz
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: default-token-hu5jz
|
||||
status:
|
||||
conditions:
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: '2019-07-08T09:31:18Z'
|
||||
status: 'True'
|
||||
type: Initialized
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: '2019-07-08T09:41:59Z'
|
||||
status: 'True'
|
||||
type: Ready
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: null
|
||||
status: 'True'
|
||||
type: ContainersReady
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: '2019-07-08T09:31:18Z'
|
||||
status: 'True'
|
||||
type: PodScheduled
|
||||
containerStatuses:
|
||||
- containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376
|
||||
image: some-image-name
|
||||
imageID: docker-pullable://some-image-id
|
||||
lastState:
|
||||
terminated:
|
||||
containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565
|
||||
exitCode: 255
|
||||
finishedAt: '2019-07-08T09:39:09Z'
|
||||
reason: Error
|
||||
startedAt: '2019-07-08T09:38:54Z'
|
||||
name: name
|
||||
ready: true
|
||||
restartCount: 6
|
||||
state:
|
||||
running:
|
||||
startedAt: '2019-07-08T09:41:59Z'
|
||||
hostIP: 10.0.0.1
|
||||
phase: Running
|
||||
podIP: 10.0.0.1
|
||||
qosClass: BestEffort
|
||||
startTime: '2019-07-08T09:31:18Z'
|
176
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/finisher/finisher.go
generated
vendored
Normal file
176
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/finisher/finisher.go
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package finisher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
goruntime "runtime"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ResultFunc is a function that returns a rest result and can be run in a goroutine
|
||||
type ResultFunc func() (runtime.Object, error)
|
||||
|
||||
// result stores the return values or panic from a ResultFunc function
|
||||
type result struct {
|
||||
// object stores the response returned by the ResultFunc function
|
||||
object runtime.Object
|
||||
// err stores the error returned by the ResultFunc function
|
||||
err error
|
||||
// reason stores the reason from a panic thrown by the ResultFunc function
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
// Return processes the result returned by a ResultFunc function
|
||||
func (r *result) Return() (runtime.Object, error) {
|
||||
switch {
|
||||
case r.reason != nil:
|
||||
// panic has higher precedence, the goroutine executing ResultFunc has panic'd,
|
||||
// so propagate a panic to the caller.
|
||||
panic(r.reason)
|
||||
case r.err != nil:
|
||||
return nil, r.err
|
||||
default:
|
||||
// if we are here, it means neither a panic, nor an error
|
||||
if status, ok := r.object.(*metav1.Status); ok {
|
||||
// An api.Status object with status != success is considered an "error",
|
||||
// which interrupts the normal response flow.
|
||||
if status.Status != metav1.StatusSuccess {
|
||||
return nil, errors.FromObject(status)
|
||||
}
|
||||
}
|
||||
return r.object, nil
|
||||
}
|
||||
}
|
||||
|
||||
// PostTimeoutLoggerFunc is a function that can be used to log the result returned
|
||||
// by a ResultFunc after the request had timed out.
|
||||
// timedOutAt is the time the request had been timed out.
|
||||
// r is the result returned by the child goroutine.
|
||||
type PostTimeoutLoggerFunc func(timedOutAt time.Time, r *result)
|
||||
|
||||
const (
|
||||
// how much time the post-timeout receiver goroutine will wait for the sender
|
||||
// (child goroutine executing ResultFunc) to send a result after the request.
|
||||
// had timed out.
|
||||
postTimeoutLoggerWait = 5 * time.Minute
|
||||
)
|
||||
|
||||
// FinishRequest makes a given ResultFunc asynchronous and handles errors returned by the response.
|
||||
func FinishRequest(ctx context.Context, fn ResultFunc) (runtime.Object, error) {
|
||||
return finishRequest(ctx, fn, postTimeoutLoggerWait, logPostTimeoutResult)
|
||||
}
|
||||
|
||||
func finishRequest(ctx context.Context, fn ResultFunc, postTimeoutWait time.Duration, postTimeoutLogger PostTimeoutLoggerFunc) (runtime.Object, error) {
|
||||
// the channel needs to be buffered since the post-timeout receiver goroutine
|
||||
// waits up to 5 minutes for the child goroutine to return.
|
||||
resultCh := make(chan *result, 1)
|
||||
|
||||
go func() {
|
||||
result := &result{}
|
||||
|
||||
// panics don't cross goroutine boundaries, so we have to handle ourselves
|
||||
defer func() {
|
||||
reason := recover()
|
||||
if reason != nil {
|
||||
// do not wrap the sentinel ErrAbortHandler panic value
|
||||
if reason != http.ErrAbortHandler {
|
||||
// Same as stdlib http server code. Manually allocate stack
|
||||
// trace buffer size to prevent excessively large logs
|
||||
const size = 64 << 10
|
||||
buf := make([]byte, size)
|
||||
buf = buf[:goruntime.Stack(buf, false)]
|
||||
reason = fmt.Sprintf("%v\n%s", reason, buf)
|
||||
}
|
||||
|
||||
// store the panic reason into the result.
|
||||
result.reason = reason
|
||||
}
|
||||
|
||||
// Propagate the result to the parent goroutine
|
||||
resultCh <- result
|
||||
}()
|
||||
|
||||
if object, err := fn(); err != nil {
|
||||
result.err = err
|
||||
} else {
|
||||
result.object = object
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-resultCh:
|
||||
return result.Return()
|
||||
case <-ctx.Done():
|
||||
// we are going to send a timeout response to the caller, but the asynchronous goroutine
|
||||
// (sender) is still executing the ResultFunc function.
|
||||
// kick off a goroutine (receiver) here to wait for the sender (goroutine executing ResultFunc)
|
||||
// to send the result and then log details of the result.
|
||||
defer func() {
|
||||
go func() {
|
||||
timedOutAt := time.Now()
|
||||
|
||||
var result *result
|
||||
select {
|
||||
case result = <-resultCh:
|
||||
case <-time.After(postTimeoutWait):
|
||||
// we will not wait forever, if we are here then we know that some sender
|
||||
// goroutines are taking longer than postTimeoutWait.
|
||||
}
|
||||
postTimeoutLogger(timedOutAt, result)
|
||||
}()
|
||||
}()
|
||||
return nil, errors.NewTimeoutError(fmt.Sprintf("request did not complete within requested timeout - %s", ctx.Err()), 0)
|
||||
}
|
||||
}
|
||||
|
||||
// logPostTimeoutResult logs a panic or an error from the result that the sender (goroutine that is
|
||||
// executing the ResultFunc function) has sent to the receiver after the request had timed out.
|
||||
// timedOutAt is the time the request had been timed out
|
||||
func logPostTimeoutResult(timedOutAt time.Time, r *result) {
|
||||
if r == nil {
|
||||
// we are using r == nil to indicate that the child goroutine never returned a result.
|
||||
metrics.RecordRequestPostTimeout(metrics.PostTimeoutSourceRestHandler, metrics.PostTimeoutHandlerPending)
|
||||
klog.Errorf("FinishRequest: post-timeout activity, waited for %s, child goroutine has not returned yet", time.Since(timedOutAt))
|
||||
return
|
||||
}
|
||||
|
||||
var status string
|
||||
switch {
|
||||
case r.reason != nil:
|
||||
// a non empty reason inside a result object indicates that there was a panic.
|
||||
status = metrics.PostTimeoutHandlerPanic
|
||||
case r.err != nil:
|
||||
status = metrics.PostTimeoutHandlerError
|
||||
default:
|
||||
status = metrics.PostTimeoutHandlerOK
|
||||
}
|
||||
|
||||
metrics.RecordRequestPostTimeout(metrics.PostTimeoutSourceRestHandler, status)
|
||||
err := fmt.Errorf("FinishRequest: post-timeout activity - time-elapsed: %s, panicked: %t, err: %v, panic-reason: %v",
|
||||
time.Since(timedOutAt), r.reason != nil, r.err, r.reason)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
341
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go
generated
vendored
Normal file
341
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go
generated
vendored
Normal file
@ -0,0 +1,341 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||
metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/server/routine"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// getterFunc performs a get request with the given context and object name. The request
|
||||
// may be used to deserialize an options object to pass to the getter.
|
||||
type getterFunc func(ctx context.Context, name string, req *http.Request) (runtime.Object, error)
|
||||
|
||||
// getResourceHandler is an HTTP handler function for get requests. It delegates to the
|
||||
// passed-in getterFunc to perform the actual get.
|
||||
func getResourceHandler(scope *RequestScope, getter getterFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
ctx, span := tracing.Start(ctx, "Get", traceFields(req)...)
|
||||
req = req.WithContext(ctx)
|
||||
defer span.End(500 * time.Millisecond)
|
||||
|
||||
namespace, name, err := scope.Namer.Name(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
|
||||
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := getter(ctx, name, req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
span.AddEvent("About to write a response")
|
||||
defer span.AddEvent("Writing http response done")
|
||||
transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result)
|
||||
}
|
||||
}
|
||||
|
||||
// GetResource returns a function that handles retrieving a single resource from a rest.Storage object.
|
||||
func GetResource(r rest.Getter, scope *RequestScope) http.HandlerFunc {
|
||||
return getResourceHandler(scope,
|
||||
func(ctx context.Context, name string, req *http.Request) (runtime.Object, error) {
|
||||
// check for export
|
||||
options := metav1.GetOptions{}
|
||||
if values := req.URL.Query(); len(values) > 0 {
|
||||
if len(values["export"]) > 0 {
|
||||
exportBool := true
|
||||
exportStrings := values["export"]
|
||||
err := runtime.Convert_Slice_string_To_bool(&exportStrings, &exportBool, nil)
|
||||
if err != nil {
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("the export parameter cannot be parsed: %v", err))
|
||||
}
|
||||
if exportBool {
|
||||
return nil, errors.NewBadRequest("the export parameter, deprecated since v1.14, is no longer supported")
|
||||
}
|
||||
}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(values, scope.MetaGroupVersion, &options); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
tracing.SpanFromContext(ctx).AddEvent("About to Get from storage")
|
||||
return r.Get(ctx, name, &options)
|
||||
})
|
||||
}
|
||||
|
||||
// GetResourceWithOptions returns a function that handles retrieving a single resource from a rest.Storage object.
|
||||
func GetResourceWithOptions(r rest.GetterWithOptions, scope *RequestScope, isSubresource bool) http.HandlerFunc {
|
||||
return getResourceHandler(scope,
|
||||
func(ctx context.Context, name string, req *http.Request) (runtime.Object, error) {
|
||||
opts, subpath, subpathKey := r.NewGetOptions()
|
||||
span := tracing.SpanFromContext(ctx)
|
||||
span.AddEvent("About to process Get options")
|
||||
if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
span.AddEvent("About to Get from storage")
|
||||
return r.Get(ctx, name, opts)
|
||||
})
|
||||
}
|
||||
|
||||
// getRequestOptions parses out options and can include path information. The path information shouldn't include the subresource.
|
||||
func getRequestOptions(req *http.Request, scope *RequestScope, into runtime.Object, subpath bool, subpathKey string, isSubresource bool) error {
|
||||
if into == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
query := req.URL.Query()
|
||||
if subpath {
|
||||
newQuery := make(url.Values)
|
||||
for k, v := range query {
|
||||
newQuery[k] = v
|
||||
}
|
||||
|
||||
ctx := req.Context()
|
||||
requestInfo, _ := request.RequestInfoFrom(ctx)
|
||||
startingIndex := 2
|
||||
if isSubresource {
|
||||
startingIndex = 3
|
||||
}
|
||||
|
||||
p := strings.Join(requestInfo.Parts[startingIndex:], "/")
|
||||
|
||||
// ensure non-empty subpaths correctly reflect a leading slash
|
||||
if len(p) > 0 && !strings.HasPrefix(p, "/") {
|
||||
p = "/" + p
|
||||
}
|
||||
|
||||
// ensure subpaths correctly reflect the presence of a trailing slash on the original request
|
||||
if strings.HasSuffix(requestInfo.Path, "/") && !strings.HasSuffix(p, "/") {
|
||||
p += "/"
|
||||
}
|
||||
|
||||
newQuery[subpathKey] = []string{p}
|
||||
query = newQuery
|
||||
}
|
||||
return scope.ParameterCodec.DecodeParameters(query, scope.Kind.GroupVersion(), into)
|
||||
}
|
||||
|
||||
func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatch bool, minRequestTimeout time.Duration) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
// For performance tracking purposes.
|
||||
ctx, span := tracing.Start(ctx, "List", traceFields(req)...)
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
namespace, err := scope.Namer.Namespace(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// Watches for single objects are routed to this function.
|
||||
// Treat a name parameter the same as a field selector entry.
|
||||
hasName := true
|
||||
_, name, err := scope.Namer.Name(req)
|
||||
if err != nil {
|
||||
hasName = false
|
||||
}
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
|
||||
opts := metainternalversion.ListOptions{}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, &opts); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
metainternalversion.SetListOptionsDefaults(&opts, utilfeature.DefaultFeatureGate.Enabled(features.WatchList))
|
||||
if errs := metainternalversionvalidation.ValidateListOptions(&opts, utilfeature.DefaultFeatureGate.Enabled(features.WatchList)); len(errs) > 0 {
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "ListOptions"}, "", errs)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
var restrictions negotiation.EndpointRestrictions
|
||||
restrictions = scope
|
||||
if isListWatchRequest(opts) {
|
||||
restrictions = &watchListEndpointRestrictions{scope}
|
||||
}
|
||||
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, restrictions)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// transform fields
|
||||
// TODO: DecodeParametersInto should do this.
|
||||
if opts.FieldSelector != nil {
|
||||
fn := func(label, value string) (newLabel, newValue string, err error) {
|
||||
return scope.Convertor.ConvertFieldLabel(scope.Kind, label, value)
|
||||
}
|
||||
if opts.FieldSelector, err = opts.FieldSelector.Transform(fn); err != nil {
|
||||
// TODO: allow bad request to set field causes based on query parameters
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if hasName {
|
||||
// metadata.name is the canonical internal name.
|
||||
// SelectionPredicate will notice that this is a request for
|
||||
// a single object and optimize the storage query accordingly.
|
||||
nameSelector := fields.OneTermEqualSelector("metadata.name", name)
|
||||
|
||||
// Note that fieldSelector setting explicitly the "metadata.name"
|
||||
// will result in reaching this branch (as the value of that field
|
||||
// is propagated to requestInfo as the name parameter.
|
||||
// That said, the allowed field selectors in this branch are:
|
||||
// nil, fields.Everything and field selector matching metadata.name
|
||||
// for our name.
|
||||
if opts.FieldSelector != nil && !opts.FieldSelector.Empty() {
|
||||
selectedName, ok := opts.FieldSelector.RequiresExactMatch("metadata.name")
|
||||
if !ok || name != selectedName {
|
||||
scope.err(errors.NewBadRequest("fieldSelector metadata.name doesn't match requested name"), w, req)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
opts.FieldSelector = nameSelector
|
||||
}
|
||||
}
|
||||
|
||||
if opts.Watch || forceWatch {
|
||||
if rw == nil {
|
||||
scope.err(errors.NewMethodNotSupported(scope.Resource.GroupResource(), "watch"), w, req)
|
||||
return
|
||||
}
|
||||
// TODO: Currently we explicitly ignore ?timeout= and use only ?timeoutSeconds=.
|
||||
timeout := time.Duration(0)
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
if timeout == 0 && minRequestTimeout > 0 {
|
||||
timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0))
|
||||
}
|
||||
|
||||
var emptyVersionedList runtime.Object
|
||||
if isListWatchRequest(opts) {
|
||||
emptyVersionedList, err = scope.Convertor.ConvertToVersion(r.NewList(), scope.Kind.GroupVersion())
|
||||
if err != nil {
|
||||
scope.err(errors.NewInternalError(err), w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("Starting watch", "path", req.URL.Path, "resourceVersion", opts.ResourceVersion, "labels", opts.LabelSelector, "fields", opts.FieldSelector, "timeout", timeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer func() { cancel() }()
|
||||
watcher, err := rw.Watch(ctx, &opts)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
handler, err := serveWatchHandler(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts), emptyVersionedList)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
// Invalidate cancel() to defer until serve() is complete.
|
||||
deferredCancel := cancel
|
||||
cancel = func() {}
|
||||
|
||||
serve := func() {
|
||||
defer deferredCancel()
|
||||
requestInfo, _ := request.RequestInfoFrom(ctx)
|
||||
metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {
|
||||
defer watcher.Stop()
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
// Run watch serving in a separate goroutine to allow freeing current stack memory
|
||||
t := routine.TaskFrom(req.Context())
|
||||
if t != nil {
|
||||
t.Func = serve
|
||||
} else {
|
||||
serve()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Log only long List requests (ignore Watch).
|
||||
defer span.End(500 * time.Millisecond)
|
||||
span.AddEvent("About to List from storage")
|
||||
result, err := r.List(ctx, &opts)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Listing from storage done")
|
||||
defer span.AddEvent("Writing http response done", attribute.Int("count", meta.LenList(result)))
|
||||
transformResponseObject(ctx, scope, req, w, http.StatusOK, outputMediaType, result)
|
||||
}
|
||||
}
|
||||
|
||||
type watchListEndpointRestrictions struct {
|
||||
negotiation.EndpointRestrictions
|
||||
}
|
||||
|
||||
func (e *watchListEndpointRestrictions) AllowsMediaTypeTransform(mimeType, mimeSubType string, target *schema.GroupVersionKind) bool {
|
||||
if target != nil && target.Kind == "Table" {
|
||||
return false
|
||||
}
|
||||
return e.EndpointRestrictions.AllowsMediaTypeTransform(mimeType, mimeSubType, target)
|
||||
}
|
||||
|
||||
func isListWatchRequest(opts metainternalversion.ListOptions) bool {
|
||||
return utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && ptr.Deref(opts.SendInitialEvents, false) && opts.AllowWatchBookmarks
|
||||
}
|
231
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go
generated
vendored
Normal file
231
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go
generated
vendored
Normal file
@ -0,0 +1,231 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
|
||||
const (
|
||||
maxUserAgentLength = 1024
|
||||
userAgentTruncateSuffix = "...TRUNCATED"
|
||||
)
|
||||
|
||||
// lazyTruncatedUserAgent implements String() string and it will
|
||||
// return user-agent which may be truncated.
|
||||
type lazyTruncatedUserAgent struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyTruncatedUserAgent) String() string {
|
||||
ua := "unknown"
|
||||
if lazy.req != nil {
|
||||
ua = utilnet.GetHTTPClient(lazy.req)
|
||||
if len(ua) > maxUserAgentLength {
|
||||
ua = ua[:maxUserAgentLength] + userAgentTruncateSuffix
|
||||
}
|
||||
}
|
||||
return ua
|
||||
}
|
||||
|
||||
// LazyClientIP implements String() string and it will
|
||||
// calls GetClientIP() lazily only when required.
|
||||
type lazyClientIP struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyClientIP) String() string {
|
||||
if lazy.req != nil {
|
||||
if ip := utilnet.GetClientIP(lazy.req); ip != nil {
|
||||
return ip.String()
|
||||
}
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyAccept implements String() string and it will
|
||||
// calls http.Request Header.Get() lazily only when required.
|
||||
type lazyAccept struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyAccept) String() string {
|
||||
if lazy.req != nil {
|
||||
accept := lazy.req.Header.Get("Accept")
|
||||
return accept
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyAPIGroup implements String() string and it will
|
||||
// lazily get Group from request info.
|
||||
type lazyAPIGroup struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyAPIGroup) String() string {
|
||||
if lazy.req != nil {
|
||||
ctx := lazy.req.Context()
|
||||
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
|
||||
if ok {
|
||||
return requestInfo.APIGroup
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyAPIVersion implements String() string and it will
|
||||
// lazily get Group from request info.
|
||||
type lazyAPIVersion struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyAPIVersion) String() string {
|
||||
if lazy.req != nil {
|
||||
ctx := lazy.req.Context()
|
||||
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
|
||||
if ok {
|
||||
return requestInfo.APIVersion
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyName implements String() string and it will
|
||||
// lazily get Group from request info.
|
||||
type lazyName struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyName) String() string {
|
||||
if lazy.req != nil {
|
||||
ctx := lazy.req.Context()
|
||||
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
|
||||
if ok {
|
||||
return requestInfo.Name
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazySubresource implements String() string and it will
|
||||
// lazily get Group from request info.
|
||||
type lazySubresource struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazySubresource) String() string {
|
||||
if lazy.req != nil {
|
||||
ctx := lazy.req.Context()
|
||||
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
|
||||
if ok {
|
||||
return requestInfo.Subresource
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyNamespace implements String() string and it will
|
||||
// lazily get Group from request info.
|
||||
type lazyNamespace struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyNamespace) String() string {
|
||||
if lazy.req != nil {
|
||||
ctx := lazy.req.Context()
|
||||
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
|
||||
if ok {
|
||||
return requestInfo.Namespace
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyAuditID implements Stringer interface to lazily retrieve
|
||||
// the audit ID associated with the request.
|
||||
type lazyAuditID struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyAuditID) String() string {
|
||||
if lazy.req != nil {
|
||||
return audit.GetAuditIDTruncated(lazy.req.Context())
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyVerb implements String() string and it will
|
||||
// lazily get normalized Verb
|
||||
type lazyVerb struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyVerb) String() string {
|
||||
if lazy.req == nil {
|
||||
return "unknown"
|
||||
}
|
||||
return metrics.NormalizedVerb(lazy.req)
|
||||
}
|
||||
|
||||
// lazyResource implements String() string and it will
|
||||
// lazily get Resource from request info
|
||||
type lazyResource struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyResource) String() string {
|
||||
if lazy.req != nil {
|
||||
ctx := lazy.req.Context()
|
||||
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
|
||||
if ok {
|
||||
return requestInfo.Resource
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// lazyScope implements String() string and it will
|
||||
// lazily get Scope from request info
|
||||
type lazyScope struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (lazy *lazyScope) String() string {
|
||||
if lazy.req != nil {
|
||||
ctx := lazy.req.Context()
|
||||
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
|
||||
if ok {
|
||||
return metrics.CleanScope(requestInfo)
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
4
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/OWNERS
generated
vendored
Normal file
4
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- logicalhan
|
63
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go
generated
vendored
Normal file
63
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
type RequestBodyVerb string
|
||||
|
||||
const (
|
||||
Patch RequestBodyVerb = "patch"
|
||||
Delete RequestBodyVerb = "delete"
|
||||
Update RequestBodyVerb = "update"
|
||||
Create RequestBodyVerb = "create"
|
||||
DeleteCollection RequestBodyVerb = "delete_collection"
|
||||
)
|
||||
|
||||
var (
|
||||
RequestBodySizes = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: "apiserver",
|
||||
Name: "request_body_size_bytes",
|
||||
Help: "Apiserver request body size in bytes broken out by resource and verb.",
|
||||
// we use 0.05 KB as the smallest bucket with 0.1 KB increments up to the
|
||||
// apiserver limit.
|
||||
Buckets: metrics.LinearBuckets(50000, 100000, 31),
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource", "verb"},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register all metrics.
|
||||
func Register() {
|
||||
registerMetrics.Do(func() {
|
||||
legacyregistry.MustRegister(RequestBodySizes)
|
||||
})
|
||||
}
|
||||
|
||||
func RecordRequestBodySize(ctx context.Context, resource string, verb RequestBodyVerb, size int) {
|
||||
RequestBodySizes.WithContext(ctx).WithLabelValues(resource, string(verb)).Observe(float64(size))
|
||||
}
|
85
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go
generated
vendored
Normal file
85
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
|
||||
// ScopeNamer handles accessing names from requests and objects
|
||||
type ScopeNamer interface {
|
||||
// Namespace returns the appropriate namespace value from the request (may be empty) or an
|
||||
// error.
|
||||
Namespace(req *http.Request) (namespace string, err error)
|
||||
// Name returns the name from the request, and an optional namespace value if this is a namespace
|
||||
// scoped call. An error is returned if the name is not available.
|
||||
Name(req *http.Request) (namespace, name string, err error)
|
||||
// ObjectName returns the namespace and name from an object if they exist, or an error if the object
|
||||
// does not support names.
|
||||
ObjectName(obj runtime.Object) (namespace, name string, err error)
|
||||
}
|
||||
|
||||
type ContextBasedNaming struct {
|
||||
Namer runtime.Namer
|
||||
ClusterScoped bool
|
||||
}
|
||||
|
||||
// ContextBasedNaming implements ScopeNamer
|
||||
var _ ScopeNamer = ContextBasedNaming{}
|
||||
|
||||
func (n ContextBasedNaming) Namespace(req *http.Request) (namespace string, err error) {
|
||||
requestInfo, ok := request.RequestInfoFrom(req.Context())
|
||||
if !ok {
|
||||
return "", fmt.Errorf("missing requestInfo")
|
||||
}
|
||||
return requestInfo.Namespace, nil
|
||||
}
|
||||
|
||||
func (n ContextBasedNaming) Name(req *http.Request) (namespace, name string, err error) {
|
||||
requestInfo, ok := request.RequestInfoFrom(req.Context())
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("missing requestInfo")
|
||||
}
|
||||
|
||||
if len(requestInfo.Name) == 0 {
|
||||
return "", "", errEmptyName
|
||||
}
|
||||
return requestInfo.Namespace, requestInfo.Name, nil
|
||||
}
|
||||
|
||||
func (n ContextBasedNaming) ObjectName(obj runtime.Object) (namespace, name string, err error) {
|
||||
name, err = n.Namer.Name(obj)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if len(name) == 0 {
|
||||
return "", "", errEmptyName
|
||||
}
|
||||
namespace, err = n.Namer.Namespace(obj)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return namespace, name, err
|
||||
}
|
||||
|
||||
// errEmptyName is returned when API requests do not fill the name section of the path.
|
||||
var errEmptyName = errors.NewBadRequest("name must be provided")
|
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go
generated
vendored
Normal file
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package negotiation contains media type negotiation logic.
|
||||
package negotiation // import "k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
99
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go
generated
vendored
Normal file
99
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package negotiation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// errNotAcceptable indicates Accept negotiation has failed
|
||||
type errNotAcceptable struct {
|
||||
accepted []string
|
||||
}
|
||||
|
||||
// NewNotAcceptableError returns an error of NotAcceptable which contains specified string
|
||||
func NewNotAcceptableError(accepted []string) error {
|
||||
return errNotAcceptable{accepted}
|
||||
}
|
||||
|
||||
func (e errNotAcceptable) Error() string {
|
||||
return fmt.Sprintf("only the following media types are accepted: %v", strings.Join(e.accepted, ", "))
|
||||
}
|
||||
|
||||
func (e errNotAcceptable) Status() metav1.Status {
|
||||
return metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusNotAcceptable,
|
||||
Reason: metav1.StatusReasonNotAcceptable,
|
||||
Message: e.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// errNotAcceptableConversion indicates Accept negotiation has failed specifically
|
||||
// for a conversion to a known type.
|
||||
type errNotAcceptableConversion struct {
|
||||
target string
|
||||
accepted []string
|
||||
}
|
||||
|
||||
// NewNotAcceptableConversionError returns an error indicating that the desired
|
||||
// API transformation to the target group version kind string is not accepted and
|
||||
// only the listed mime types are allowed. This is temporary while Table does not
|
||||
// yet support protobuf encoding.
|
||||
func NewNotAcceptableConversionError(target string, accepted []string) error {
|
||||
return errNotAcceptableConversion{target, accepted}
|
||||
}
|
||||
|
||||
func (e errNotAcceptableConversion) Error() string {
|
||||
return fmt.Sprintf("only the following media types are accepted when converting to %s: %v", e.target, strings.Join(e.accepted, ", "))
|
||||
}
|
||||
|
||||
func (e errNotAcceptableConversion) Status() metav1.Status {
|
||||
return metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusNotAcceptable,
|
||||
Reason: metav1.StatusReasonNotAcceptable,
|
||||
Message: e.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// errUnsupportedMediaType indicates Content-Type is not recognized
|
||||
type errUnsupportedMediaType struct {
|
||||
accepted []string
|
||||
}
|
||||
|
||||
// NewUnsupportedMediaTypeError returns an error of UnsupportedMediaType which contains specified string
|
||||
func NewUnsupportedMediaTypeError(accepted []string) error {
|
||||
return errUnsupportedMediaType{accepted}
|
||||
}
|
||||
|
||||
func (e errUnsupportedMediaType) Error() string {
|
||||
return fmt.Sprintf("the body of the request was in an unknown format - accepted media types include: %v", strings.Join(e.accepted, ", "))
|
||||
}
|
||||
|
||||
func (e errUnsupportedMediaType) Status() metav1.Status {
|
||||
return metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusUnsupportedMediaType,
|
||||
Reason: metav1.StatusReasonUnsupportedMediaType,
|
||||
Message: e.Error(),
|
||||
}
|
||||
}
|
269
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go
generated
vendored
Normal file
269
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package negotiation
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/munnerz/goautoneg"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
)
|
||||
|
||||
// MediaTypesForSerializer returns a list of media and stream media types for the server.
|
||||
func MediaTypesForSerializer(ns runtime.NegotiatedSerializer) (mediaTypes, streamMediaTypes []string) {
|
||||
for _, info := range ns.SupportedMediaTypes() {
|
||||
mediaTypes = append(mediaTypes, info.MediaType)
|
||||
if info.StreamSerializer != nil {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) && info.MediaType == runtime.ContentTypeCBOR {
|
||||
streamMediaTypes = append(streamMediaTypes, runtime.ContentTypeCBORSequence)
|
||||
continue
|
||||
}
|
||||
// stream=watch is the existing mime-type parameter for watch
|
||||
streamMediaTypes = append(streamMediaTypes, info.MediaType+";stream=watch")
|
||||
}
|
||||
}
|
||||
return mediaTypes, streamMediaTypes
|
||||
}
|
||||
|
||||
// NegotiateOutputMediaType negotiates the output structured media type and a serializer, or
|
||||
// returns an error.
|
||||
func NegotiateOutputMediaType(req *http.Request, ns runtime.NegotiatedSerializer, restrictions EndpointRestrictions) (MediaTypeOptions, runtime.SerializerInfo, error) {
|
||||
mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), ns.SupportedMediaTypes(), restrictions)
|
||||
if !ok {
|
||||
supported, _ := MediaTypesForSerializer(ns)
|
||||
return mediaType, runtime.SerializerInfo{}, NewNotAcceptableError(supported)
|
||||
}
|
||||
// TODO: move into resthandler
|
||||
info := mediaType.Accepted
|
||||
if (mediaType.Pretty || isPrettyPrint(req)) && info.PrettySerializer != nil {
|
||||
info.Serializer = info.PrettySerializer
|
||||
}
|
||||
return mediaType, info, nil
|
||||
}
|
||||
|
||||
// NegotiateOutputMediaTypeStream returns a stream serializer for the given request.
|
||||
func NegotiateOutputMediaTypeStream(req *http.Request, ns runtime.NegotiatedSerializer, restrictions EndpointRestrictions) (runtime.SerializerInfo, error) {
|
||||
mediaType, ok := NegotiateMediaTypeOptions(req.Header.Get("Accept"), ns.SupportedMediaTypes(), restrictions)
|
||||
if !ok || mediaType.Accepted.StreamSerializer == nil {
|
||||
_, supported := MediaTypesForSerializer(ns)
|
||||
return runtime.SerializerInfo{}, NewNotAcceptableError(supported)
|
||||
}
|
||||
return mediaType.Accepted, nil
|
||||
}
|
||||
|
||||
// NegotiateInputSerializer returns the input serializer for the provided request.
|
||||
func NegotiateInputSerializer(req *http.Request, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) {
|
||||
mediaType := req.Header.Get("Content-Type")
|
||||
return NegotiateInputSerializerForMediaType(mediaType, streaming, ns)
|
||||
}
|
||||
|
||||
// NegotiateInputSerializerForMediaType returns the appropriate serializer for the given media type or an error.
|
||||
func NegotiateInputSerializerForMediaType(mediaType string, streaming bool, ns runtime.NegotiatedSerializer) (runtime.SerializerInfo, error) {
|
||||
mediaTypes := ns.SupportedMediaTypes()
|
||||
if len(mediaType) == 0 {
|
||||
mediaType = mediaTypes[0].MediaType
|
||||
}
|
||||
if mediaType, _, err := mime.ParseMediaType(mediaType); err == nil {
|
||||
if info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType); ok {
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
|
||||
supported, streamingSupported := MediaTypesForSerializer(ns)
|
||||
if streaming {
|
||||
return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(streamingSupported)
|
||||
}
|
||||
return runtime.SerializerInfo{}, NewUnsupportedMediaTypeError(supported)
|
||||
}
|
||||
|
||||
// isPrettyPrint returns true if the "pretty" query parameter is true or if the User-Agent
|
||||
// matches known "human" clients.
|
||||
func isPrettyPrint(req *http.Request) bool {
|
||||
// DEPRECATED: should be part of the content type
|
||||
if req.URL != nil {
|
||||
// avoid an allocation caused by parsing the URL query
|
||||
if strings.Contains(req.URL.RawQuery, "pretty") {
|
||||
pp := req.URL.Query().Get("pretty")
|
||||
if len(pp) > 0 {
|
||||
pretty, _ := strconv.ParseBool(pp)
|
||||
return pretty
|
||||
}
|
||||
}
|
||||
}
|
||||
userAgent := req.UserAgent()
|
||||
// This covers basic all browsers and cli http tools
|
||||
if strings.HasPrefix(userAgent, "curl") || strings.HasPrefix(userAgent, "Wget") || strings.HasPrefix(userAgent, "Mozilla/5.0") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// EndpointRestrictions is an interface that allows content-type negotiation
|
||||
// to verify server support for specific options
|
||||
type EndpointRestrictions interface {
|
||||
// AllowsMediaTypeTransform returns true if the endpoint allows either the requested mime type
|
||||
// or the requested transformation. If false, the caller should ignore this mime type. If the
|
||||
// target is nil, the client is not requesting a transformation.
|
||||
AllowsMediaTypeTransform(mimeType, mimeSubType string, target *schema.GroupVersionKind) bool
|
||||
// AllowsServerVersion should return true if the specified version is valid
|
||||
// for the server group.
|
||||
AllowsServerVersion(version string) bool
|
||||
// AllowsStreamSchema should return true if the specified stream schema is
|
||||
// valid for the server group.
|
||||
AllowsStreamSchema(schema string) bool
|
||||
}
|
||||
|
||||
// DefaultEndpointRestrictions is the default EndpointRestrictions which allows
|
||||
// content-type negotiation to verify server support for specific options
|
||||
var DefaultEndpointRestrictions = emptyEndpointRestrictions{}
|
||||
|
||||
type emptyEndpointRestrictions struct{}
|
||||
|
||||
func (emptyEndpointRestrictions) AllowsMediaTypeTransform(mimeType string, mimeSubType string, gvk *schema.GroupVersionKind) bool {
|
||||
return gvk == nil
|
||||
}
|
||||
func (emptyEndpointRestrictions) AllowsServerVersion(string) bool { return false }
|
||||
func (emptyEndpointRestrictions) AllowsStreamSchema(s string) bool { return s == "watch" }
|
||||
|
||||
// MediaTypeOptions describes information for a given media type that may alter
|
||||
// the server response
|
||||
type MediaTypeOptions struct {
|
||||
// pretty is true if the requested representation should be formatted for human
|
||||
// viewing
|
||||
Pretty bool
|
||||
|
||||
// stream, if set, indicates that a streaming protocol variant of this encoding
|
||||
// is desired. The only currently supported value is watch which returns versioned
|
||||
// events. In the future, this may refer to other stream protocols.
|
||||
Stream string
|
||||
|
||||
// convert is a request to alter the type of object returned by the server from the
|
||||
// normal response
|
||||
Convert *schema.GroupVersionKind
|
||||
// useServerVersion is an optional version for the server group
|
||||
UseServerVersion string
|
||||
|
||||
// export is true if the representation requested should exclude fields the server
|
||||
// has set
|
||||
Export bool
|
||||
|
||||
// unrecognized is a list of all unrecognized keys
|
||||
Unrecognized []string
|
||||
|
||||
// the accepted media type from the client
|
||||
Accepted runtime.SerializerInfo
|
||||
}
|
||||
|
||||
// acceptMediaTypeOptions returns an options object that matches the provided media type params. If
|
||||
// it returns false, the provided options are not allowed and the media type must be skipped. These
|
||||
// parameters are unversioned and may not be changed.
|
||||
func acceptMediaTypeOptions(params map[string]string, accepts *runtime.SerializerInfo, endpoint EndpointRestrictions) (MediaTypeOptions, bool) {
|
||||
var options MediaTypeOptions
|
||||
|
||||
// extract all known parameters
|
||||
for k, v := range params {
|
||||
switch k {
|
||||
|
||||
// controls transformation of the object when returned
|
||||
case "as":
|
||||
if options.Convert == nil {
|
||||
options.Convert = &schema.GroupVersionKind{}
|
||||
}
|
||||
options.Convert.Kind = v
|
||||
case "g":
|
||||
if options.Convert == nil {
|
||||
options.Convert = &schema.GroupVersionKind{}
|
||||
}
|
||||
options.Convert.Group = v
|
||||
case "v":
|
||||
if options.Convert == nil {
|
||||
options.Convert = &schema.GroupVersionKind{}
|
||||
}
|
||||
options.Convert.Version = v
|
||||
|
||||
// controls the streaming schema
|
||||
case "stream":
|
||||
if len(v) > 0 && (accepts.StreamSerializer == nil || !endpoint.AllowsStreamSchema(v)) {
|
||||
return MediaTypeOptions{}, false
|
||||
}
|
||||
options.Stream = v
|
||||
|
||||
// controls the version of the server API group used
|
||||
// for generic output
|
||||
case "sv":
|
||||
if len(v) > 0 && !endpoint.AllowsServerVersion(v) {
|
||||
return MediaTypeOptions{}, false
|
||||
}
|
||||
options.UseServerVersion = v
|
||||
|
||||
// if specified, the server should transform the returned
|
||||
// output and remove fields that are always server specified,
|
||||
// or which fit the default behavior.
|
||||
case "export":
|
||||
options.Export = v == "1"
|
||||
|
||||
// if specified, the pretty serializer will be used
|
||||
case "pretty":
|
||||
options.Pretty = v == "1"
|
||||
|
||||
default:
|
||||
options.Unrecognized = append(options.Unrecognized, k)
|
||||
}
|
||||
}
|
||||
|
||||
if !endpoint.AllowsMediaTypeTransform(accepts.MediaTypeType, accepts.MediaTypeSubType, options.Convert) {
|
||||
return MediaTypeOptions{}, false
|
||||
}
|
||||
|
||||
options.Accepted = *accepts
|
||||
return options, true
|
||||
}
|
||||
|
||||
// NegotiateMediaTypeOptions returns the most appropriate content type given the accept header and
|
||||
// a list of alternatives along with the accepted media type parameters.
|
||||
func NegotiateMediaTypeOptions(header string, accepted []runtime.SerializerInfo, endpoint EndpointRestrictions) (MediaTypeOptions, bool) {
|
||||
if len(header) == 0 && len(accepted) > 0 {
|
||||
return MediaTypeOptions{
|
||||
Accepted: accepted[0],
|
||||
}, true
|
||||
}
|
||||
|
||||
clauses := goautoneg.ParseAccept(header)
|
||||
for i := range clauses {
|
||||
clause := &clauses[i]
|
||||
for i := range accepted {
|
||||
accepts := &accepted[i]
|
||||
switch {
|
||||
case clause.Type == accepts.MediaTypeType && clause.SubType == accepts.MediaTypeSubType,
|
||||
clause.Type == accepts.MediaTypeType && clause.SubType == "*",
|
||||
clause.Type == "*" && clause.SubType == "*":
|
||||
if retVal, ret := acceptMediaTypeOptions(clause.Params, accepts, endpoint); ret {
|
||||
return retVal, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return MediaTypeOptions{}, false
|
||||
}
|
827
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go
generated
vendored
Normal file
827
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go
generated
vendored
Normal file
@ -0,0 +1,827 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
jsonpatch "gopkg.in/evanphx/json-patch.v4"
|
||||
kjson "sigs.k8s.io/json"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/apimachinery/pkg/util/mergepatch"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
|
||||
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/util/dryrun"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
// maximum number of operations a single json patch may contain.
|
||||
maxJSONPatchOperations = 10000
|
||||
)
|
||||
|
||||
// PatchResource returns a function that will handle a resource patch.
|
||||
func PatchResource(r rest.Patcher, scope *RequestScope, admit admission.Interface, patchTypes []string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
// For performance tracking purposes.
|
||||
ctx, span := tracing.Start(ctx, "Patch", traceFields(req)...)
|
||||
req = req.WithContext(ctx)
|
||||
defer span.End(500 * time.Millisecond)
|
||||
|
||||
// Do this first, otherwise name extraction can fail for unrecognized content types
|
||||
// TODO: handle this in negotiation
|
||||
contentType := req.Header.Get("Content-Type")
|
||||
// Remove "; charset=" if included in header.
|
||||
if idx := strings.Index(contentType, ";"); idx > 0 {
|
||||
contentType = contentType[:idx]
|
||||
}
|
||||
patchType := types.PatchType(contentType)
|
||||
|
||||
// Ensure the patchType is one we support
|
||||
if !sets.NewString(patchTypes...).Has(contentType) {
|
||||
scope.err(negotiation.NewUnsupportedMediaTypeError(patchTypes), w, req)
|
||||
return
|
||||
}
|
||||
|
||||
namespace, name, err := scope.Namer.Name(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
|
||||
// timeout inside the parent context is lower than requestTimeoutUpperBound.
|
||||
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
|
||||
defer cancel()
|
||||
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
|
||||
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
patchBytes, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Patch)
|
||||
if err != nil {
|
||||
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(patchBytes)), attribute.String("err", err.Error()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(patchBytes)))
|
||||
|
||||
options := &metav1.PatchOptions{}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
if errs := validation.ValidatePatchOptions(options, patchType); len(errs) > 0 {
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "PatchOptions"}, "", errs)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("PatchOptions"))
|
||||
|
||||
admit = admission.WithAudit(admit)
|
||||
|
||||
audit.LogRequestPatch(req.Context(), patchBytes)
|
||||
span.AddEvent("Recorded the audit event")
|
||||
|
||||
var baseContentType string
|
||||
switch patchType {
|
||||
case types.ApplyYAMLPatchType:
|
||||
baseContentType = runtime.ContentTypeYAML
|
||||
case types.ApplyCBORPatchType:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
|
||||
// This request should have already been rejected by the
|
||||
// Content-Type allowlist check. Return 500 because assumptions are
|
||||
// already broken and the feature is not GA.
|
||||
utilruntime.HandleErrorWithContext(req.Context(), nil, "The patch content-type allowlist check should have made this unreachable.")
|
||||
scope.err(errors.NewInternalError(errors.NewInternalError(fmt.Errorf("unexpected patch type: %v", patchType))), w, req)
|
||||
return
|
||||
}
|
||||
|
||||
baseContentType = runtime.ContentTypeCBOR
|
||||
default:
|
||||
baseContentType = runtime.ContentTypeJSON
|
||||
}
|
||||
|
||||
s, ok := runtime.SerializerInfoForMediaType(scope.Serializer.SupportedMediaTypes(), baseContentType)
|
||||
if !ok {
|
||||
scope.err(fmt.Errorf("no serializer defined for %v", baseContentType), w, req)
|
||||
return
|
||||
}
|
||||
gv := scope.Kind.GroupVersion()
|
||||
|
||||
validationDirective := fieldValidation(options.FieldValidation)
|
||||
decodeSerializer := s.Serializer
|
||||
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
|
||||
decodeSerializer = s.StrictSerializer
|
||||
}
|
||||
|
||||
codec := runtime.NewCodec(
|
||||
scope.Serializer.EncoderForVersion(s.Serializer, gv),
|
||||
scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion),
|
||||
)
|
||||
|
||||
userInfo, _ := request.UserFrom(ctx)
|
||||
staticCreateAttributes := admission.NewAttributesRecord(
|
||||
nil,
|
||||
nil,
|
||||
scope.Kind,
|
||||
namespace,
|
||||
name,
|
||||
scope.Resource,
|
||||
scope.Subresource,
|
||||
admission.Create,
|
||||
patchToCreateOptions(options),
|
||||
dryrun.IsDryRun(options.DryRun),
|
||||
userInfo)
|
||||
staticUpdateAttributes := admission.NewAttributesRecord(
|
||||
nil,
|
||||
nil,
|
||||
scope.Kind,
|
||||
namespace,
|
||||
name,
|
||||
scope.Resource,
|
||||
scope.Subresource,
|
||||
admission.Update,
|
||||
patchToUpdateOptions(options),
|
||||
dryrun.IsDryRun(options.DryRun),
|
||||
userInfo,
|
||||
)
|
||||
|
||||
admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)
|
||||
|
||||
mutatingAdmission, _ := admit.(admission.MutationInterface)
|
||||
createAuthorizerAttributes := authorizer.AttributesRecord{
|
||||
User: userInfo,
|
||||
ResourceRequest: true,
|
||||
Path: req.URL.Path,
|
||||
Verb: "create",
|
||||
APIGroup: scope.Resource.Group,
|
||||
APIVersion: scope.Resource.Version,
|
||||
Resource: scope.Resource.Resource,
|
||||
Subresource: scope.Subresource,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
p := patcher{
|
||||
namer: scope.Namer,
|
||||
creater: scope.Creater,
|
||||
defaulter: scope.Defaulter,
|
||||
typer: scope.Typer,
|
||||
unsafeConvertor: scope.UnsafeConvertor,
|
||||
kind: scope.Kind,
|
||||
resource: scope.Resource,
|
||||
subresource: scope.Subresource,
|
||||
dryRun: dryrun.IsDryRun(options.DryRun),
|
||||
validationDirective: validationDirective,
|
||||
|
||||
objectInterfaces: scope,
|
||||
|
||||
hubGroupVersion: scope.HubGroupVersion,
|
||||
|
||||
createValidation: withAuthorization(rest.AdmissionToValidateObjectFunc(admit, staticCreateAttributes, scope), scope.Authorizer, createAuthorizerAttributes),
|
||||
updateValidation: rest.AdmissionToValidateObjectUpdateFunc(admit, staticUpdateAttributes, scope),
|
||||
admissionCheck: mutatingAdmission,
|
||||
|
||||
codec: codec,
|
||||
|
||||
options: options,
|
||||
|
||||
restPatcher: r,
|
||||
name: name,
|
||||
patchType: patchType,
|
||||
patchBytes: patchBytes,
|
||||
userAgent: req.UserAgent(),
|
||||
}
|
||||
|
||||
result, wasCreated, err := p.patchResource(ctx, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Object stored in database")
|
||||
|
||||
status := http.StatusOK
|
||||
if wasCreated {
|
||||
status = http.StatusCreated
|
||||
}
|
||||
|
||||
span.AddEvent("About to write a response")
|
||||
defer span.AddEvent("Writing http response done")
|
||||
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
|
||||
}
|
||||
}
|
||||
|
||||
type mutateObjectUpdateFunc func(ctx context.Context, obj, old runtime.Object) error
|
||||
|
||||
// patcher breaks the process of patch application and retries into smaller
|
||||
// pieces of functionality.
|
||||
// TODO: Use builder pattern to construct this object?
|
||||
// TODO: As part of that effort, some aspects of PatchResource above could be
|
||||
// moved into this type.
|
||||
type patcher struct {
|
||||
// Pieces of RequestScope
|
||||
namer ScopeNamer
|
||||
creater runtime.ObjectCreater
|
||||
defaulter runtime.ObjectDefaulter
|
||||
typer runtime.ObjectTyper
|
||||
unsafeConvertor runtime.ObjectConvertor
|
||||
resource schema.GroupVersionResource
|
||||
kind schema.GroupVersionKind
|
||||
subresource string
|
||||
dryRun bool
|
||||
validationDirective string
|
||||
|
||||
objectInterfaces admission.ObjectInterfaces
|
||||
|
||||
hubGroupVersion schema.GroupVersion
|
||||
|
||||
// Validation functions
|
||||
createValidation rest.ValidateObjectFunc
|
||||
updateValidation rest.ValidateObjectUpdateFunc
|
||||
admissionCheck admission.MutationInterface
|
||||
|
||||
codec runtime.Codec
|
||||
|
||||
options *metav1.PatchOptions
|
||||
|
||||
// Operation information
|
||||
restPatcher rest.Patcher
|
||||
name string
|
||||
patchType types.PatchType
|
||||
patchBytes []byte
|
||||
userAgent string
|
||||
|
||||
// Set at invocation-time (by applyPatch) and immutable thereafter
|
||||
namespace string
|
||||
updatedObjectInfo rest.UpdatedObjectInfo
|
||||
mechanism patchMechanism
|
||||
forceAllowCreate bool
|
||||
}
|
||||
|
||||
type patchMechanism interface {
|
||||
applyPatchToCurrentObject(requextContext context.Context, currentObject runtime.Object) (runtime.Object, error)
|
||||
createNewObject(requestContext context.Context) (runtime.Object, error)
|
||||
}
|
||||
|
||||
type jsonPatcher struct {
|
||||
*patcher
|
||||
|
||||
fieldManager *managedfields.FieldManager
|
||||
}
|
||||
|
||||
func (p *jsonPatcher) applyPatchToCurrentObject(requestContext context.Context, currentObject runtime.Object) (runtime.Object, error) {
|
||||
// Encode will convert & return a versioned object in JSON.
|
||||
currentObjJS, err := runtime.Encode(p.codec, currentObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply the patch.
|
||||
patchedObjJS, appliedStrictErrs, err := p.applyJSPatch(currentObjJS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct the resulting typed, unversioned object.
|
||||
objToUpdate := p.restPatcher.New()
|
||||
if err := runtime.DecodeInto(p.codec, patchedObjJS, objToUpdate); err != nil {
|
||||
strictError, isStrictError := runtime.AsStrictDecodingError(err)
|
||||
switch {
|
||||
case !isStrictError:
|
||||
// disregard any appliedStrictErrs, because it's an incomplete
|
||||
// list of strict errors given that we don't know what fields were
|
||||
// unknown because DecodeInto failed. Non-strict errors trump in this case.
|
||||
return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), string(patchedObjJS), err.Error()),
|
||||
})
|
||||
case p.validationDirective == metav1.FieldValidationWarn:
|
||||
addStrictDecodingWarnings(requestContext, append(appliedStrictErrs, strictError.Errors()...))
|
||||
default:
|
||||
strictDecodingError := runtime.NewStrictDecodingError(append(appliedStrictErrs, strictError.Errors()...))
|
||||
return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), string(patchedObjJS), strictDecodingError.Error()),
|
||||
})
|
||||
}
|
||||
} else if len(appliedStrictErrs) > 0 {
|
||||
switch {
|
||||
case p.validationDirective == metav1.FieldValidationWarn:
|
||||
addStrictDecodingWarnings(requestContext, appliedStrictErrs)
|
||||
default:
|
||||
return nil, errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), string(patchedObjJS), runtime.NewStrictDecodingError(appliedStrictErrs).Error()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if p.options == nil {
|
||||
// Provide a more informative error for the crash that would
|
||||
// happen on the next line
|
||||
panic("PatchOptions required but not provided")
|
||||
}
|
||||
objToUpdate = p.fieldManager.UpdateNoErrors(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent))
|
||||
return objToUpdate, nil
|
||||
}
|
||||
|
||||
func (p *jsonPatcher) createNewObject(_ context.Context) (runtime.Object, error) {
|
||||
return nil, errors.NewNotFound(p.resource.GroupResource(), p.name)
|
||||
}
|
||||
|
||||
type jsonPatchOp struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
From string `json:"from"`
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
// applyJSPatch applies the patch. Input and output objects must both have
|
||||
// the external version, since that is what the patch must have been constructed against.
|
||||
func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, strictErrors []error, retErr error) {
|
||||
switch p.patchType {
|
||||
case types.JSONPatchType:
|
||||
if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn {
|
||||
var v []jsonPatchOp
|
||||
var err error
|
||||
if strictErrors, err = kjson.UnmarshalStrict(p.patchBytes, &v); err != nil {
|
||||
return nil, nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
|
||||
}
|
||||
for i, e := range strictErrors {
|
||||
strictErrors[i] = fmt.Errorf("json patch %v", e)
|
||||
}
|
||||
}
|
||||
|
||||
patchObj, err := jsonpatch.DecodePatch(p.patchBytes)
|
||||
if err != nil {
|
||||
return nil, nil, errors.NewBadRequest(err.Error())
|
||||
}
|
||||
if len(patchObj) > maxJSONPatchOperations {
|
||||
return nil, nil, errors.NewRequestEntityTooLargeError(
|
||||
fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d",
|
||||
maxJSONPatchOperations, len(patchObj)))
|
||||
}
|
||||
patchedJS, err := patchObj.Apply(versionedJS)
|
||||
if err != nil {
|
||||
return nil, nil, errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false)
|
||||
}
|
||||
return patchedJS, strictErrors, nil
|
||||
case types.MergePatchType:
|
||||
if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn {
|
||||
v := map[string]interface{}{}
|
||||
var err error
|
||||
strictErrors, err = kjson.UnmarshalStrict(p.patchBytes, &v)
|
||||
if err != nil {
|
||||
return nil, nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
patchedJS, retErr = jsonpatch.MergePatch(versionedJS, p.patchBytes)
|
||||
if retErr == jsonpatch.ErrBadJSONPatch {
|
||||
return nil, nil, errors.NewBadRequest(retErr.Error())
|
||||
}
|
||||
return patchedJS, strictErrors, retErr
|
||||
default:
|
||||
// only here as a safety net - go-restful filters content-type
|
||||
return nil, nil, fmt.Errorf("unknown Content-Type header for patch: %v", p.patchType)
|
||||
}
|
||||
}
|
||||
|
||||
type smpPatcher struct {
|
||||
*patcher
|
||||
|
||||
// Schema
|
||||
schemaReferenceObj runtime.Object
|
||||
fieldManager *managedfields.FieldManager
|
||||
}
|
||||
|
||||
func (p *smpPatcher) applyPatchToCurrentObject(requestContext context.Context, currentObject runtime.Object) (runtime.Object, error) {
|
||||
// Since the patch is applied on versioned objects, we need to convert the
|
||||
// current object to versioned representation first.
|
||||
currentVersionedObject, err := p.unsafeConvertor.ConvertToVersion(currentObject, p.kind.GroupVersion())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
versionedObjToUpdate, err := p.creater.New(p.kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := strategicPatchObject(requestContext, p.defaulter, currentVersionedObject, p.patchBytes, versionedObjToUpdate, p.schemaReferenceObj, p.validationDirective); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert the object back to the hub version
|
||||
newObj, err := p.unsafeConvertor.ConvertToVersion(versionedObjToUpdate, p.hubGroupVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newObj = p.fieldManager.UpdateNoErrors(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent))
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
func (p *smpPatcher) createNewObject(_ context.Context) (runtime.Object, error) {
|
||||
return nil, errors.NewNotFound(p.resource.GroupResource(), p.name)
|
||||
}
|
||||
|
||||
func newApplyPatcher(p *patcher, fieldManager *managedfields.FieldManager, unmarshalFn, unmarshalStrictFn func([]byte, interface{}) error) *applyPatcher {
|
||||
return &applyPatcher{
|
||||
fieldManager: fieldManager,
|
||||
patch: p.patchBytes,
|
||||
options: p.options,
|
||||
creater: p.creater,
|
||||
kind: p.kind,
|
||||
userAgent: p.userAgent,
|
||||
validationDirective: p.validationDirective,
|
||||
unmarshalFn: unmarshalFn,
|
||||
unmarshalStrictFn: unmarshalStrictFn,
|
||||
}
|
||||
}
|
||||
|
||||
type applyPatcher struct {
|
||||
patch []byte
|
||||
options *metav1.PatchOptions
|
||||
creater runtime.ObjectCreater
|
||||
kind schema.GroupVersionKind
|
||||
fieldManager *managedfields.FieldManager
|
||||
userAgent string
|
||||
validationDirective string
|
||||
unmarshalFn func(data []byte, v interface{}) error
|
||||
unmarshalStrictFn func(data []byte, v interface{}) error
|
||||
}
|
||||
|
||||
func (p *applyPatcher) applyPatchToCurrentObject(requestContext context.Context, obj runtime.Object) (runtime.Object, error) {
|
||||
force := false
|
||||
if p.options.Force != nil {
|
||||
force = *p.options.Force
|
||||
}
|
||||
if p.fieldManager == nil {
|
||||
panic("FieldManager must be installed to run apply")
|
||||
}
|
||||
|
||||
patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
|
||||
if err := p.unmarshalFn(p.patch, &patchObj.Object); err != nil {
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err))
|
||||
}
|
||||
|
||||
obj, err := p.fieldManager.Apply(obj, patchObj, p.options.FieldManager, force)
|
||||
if err != nil {
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// TODO: spawn something to track deciding whether a fieldValidation=Strict
|
||||
// fatal error should return before an error from the apply operation
|
||||
if p.validationDirective == metav1.FieldValidationStrict || p.validationDirective == metav1.FieldValidationWarn {
|
||||
if err := p.unmarshalStrictFn(p.patch, &map[string]interface{}{}); err != nil {
|
||||
if p.validationDirective == metav1.FieldValidationStrict {
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("error strict decoding YAML: %v", err))
|
||||
}
|
||||
addStrictDecodingWarnings(requestContext, []error{err})
|
||||
}
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (p *applyPatcher) createNewObject(requestContext context.Context) (runtime.Object, error) {
|
||||
obj, err := p.creater.New(p.kind)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new object: %v", err)
|
||||
}
|
||||
return p.applyPatchToCurrentObject(requestContext, obj)
|
||||
}
|
||||
|
||||
// strategicPatchObject applies a strategic merge patch of `patchBytes` to
|
||||
// `originalObject` and stores the result in `objToUpdate`.
|
||||
// It additionally returns the map[string]interface{} representation of the
|
||||
// `originalObject` and `patchBytes`.
|
||||
// NOTE: Both `originalObject` and `objToUpdate` are supposed to be versioned.
|
||||
func strategicPatchObject(
|
||||
requestContext context.Context,
|
||||
defaulter runtime.ObjectDefaulter,
|
||||
originalObject runtime.Object,
|
||||
patchBytes []byte,
|
||||
objToUpdate runtime.Object,
|
||||
schemaReferenceObj runtime.Object,
|
||||
validationDirective string,
|
||||
) error {
|
||||
originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patchMap := make(map[string]interface{})
|
||||
var strictErrs []error
|
||||
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
|
||||
strictErrs, err = kjson.UnmarshalStrict(patchBytes, &patchMap)
|
||||
if err != nil {
|
||||
return errors.NewBadRequest(err.Error())
|
||||
}
|
||||
} else {
|
||||
if err = kjson.UnmarshalCaseSensitivePreserveInts(patchBytes, &patchMap); err != nil {
|
||||
return errors.NewBadRequest(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err := applyPatchToObject(requestContext, defaulter, originalObjMap, patchMap, objToUpdate, schemaReferenceObj, strictErrs, validationDirective); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyPatch is called every time GuaranteedUpdate asks for the updated object,
|
||||
// and is given the currently persisted object as input.
|
||||
// TODO: rename this function because the name implies it is related to applyPatcher
|
||||
func (p *patcher) applyPatch(ctx context.Context, _, currentObject runtime.Object) (objToUpdate runtime.Object, patchErr error) {
|
||||
// Make sure we actually have a persisted currentObject
|
||||
tracing.SpanFromContext(ctx).AddEvent("About to apply patch")
|
||||
currentObjectHasUID, err := hasUID(currentObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !currentObjectHasUID {
|
||||
objToUpdate, patchErr = p.mechanism.createNewObject(ctx)
|
||||
} else {
|
||||
objToUpdate, patchErr = p.mechanism.applyPatchToCurrentObject(ctx, currentObject)
|
||||
}
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, patchErr
|
||||
}
|
||||
|
||||
objToUpdateHasUID, err := hasUID(objToUpdate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if objToUpdateHasUID && !currentObjectHasUID {
|
||||
accessor, err := meta.Accessor(objToUpdate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.NewConflict(p.resource.GroupResource(), p.name, fmt.Errorf("uid mismatch: the provided object specified uid %s, and no existing object was found", accessor.GetUID()))
|
||||
}
|
||||
|
||||
// if this object supports namespace info
|
||||
if objectMeta, err := meta.Accessor(objToUpdate); err == nil {
|
||||
// ensure namespace on the object is correct, or error if a conflicting namespace was set in the object
|
||||
if err := rest.EnsureObjectNamespaceMatchesRequestNamespace(rest.ExpectedNamespaceForResource(p.namespace, p.resource), objectMeta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkName(objToUpdate, p.name, p.namespace, p.namer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return objToUpdate, nil
|
||||
}
|
||||
|
||||
func (p *patcher) admissionAttributes(ctx context.Context, updatedObject runtime.Object, currentObject runtime.Object, operation admission.Operation, operationOptions runtime.Object) admission.Attributes {
|
||||
userInfo, _ := request.UserFrom(ctx)
|
||||
return admission.NewAttributesRecord(updatedObject, currentObject, p.kind, p.namespace, p.name, p.resource, p.subresource, operation, operationOptions, p.dryRun, userInfo)
|
||||
}
|
||||
|
||||
// applyAdmission is called every time GuaranteedUpdate asks for the updated object,
|
||||
// and is given the currently persisted object and the patched object as input.
|
||||
// TODO: rename this function because the name implies it is related to applyPatcher
|
||||
func (p *patcher) applyAdmission(ctx context.Context, patchedObject runtime.Object, currentObject runtime.Object) (runtime.Object, error) {
|
||||
tracing.SpanFromContext(ctx).AddEvent("About to check admission control")
|
||||
var operation admission.Operation
|
||||
var options runtime.Object
|
||||
if hasUID, err := hasUID(currentObject); err != nil {
|
||||
return nil, err
|
||||
} else if !hasUID {
|
||||
operation = admission.Create
|
||||
currentObject = nil
|
||||
options = patchToCreateOptions(p.options)
|
||||
} else {
|
||||
operation = admission.Update
|
||||
options = patchToUpdateOptions(p.options)
|
||||
}
|
||||
if p.admissionCheck != nil && p.admissionCheck.Handles(operation) {
|
||||
attributes := p.admissionAttributes(ctx, patchedObject, currentObject, operation, options)
|
||||
return patchedObject, p.admissionCheck.Admit(ctx, attributes, p.objectInterfaces)
|
||||
}
|
||||
return patchedObject, nil
|
||||
}
|
||||
|
||||
// patchResource divides PatchResource for easier unit testing
|
||||
func (p *patcher) patchResource(ctx context.Context, scope *RequestScope) (runtime.Object, bool, error) {
|
||||
p.namespace = request.NamespaceValue(ctx)
|
||||
switch p.patchType {
|
||||
case types.JSONPatchType, types.MergePatchType:
|
||||
p.mechanism = &jsonPatcher{
|
||||
patcher: p,
|
||||
fieldManager: scope.FieldManager,
|
||||
}
|
||||
case types.StrategicMergePatchType:
|
||||
schemaReferenceObj, err := p.unsafeConvertor.ConvertToVersion(p.restPatcher.New(), p.kind.GroupVersion())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
p.mechanism = &smpPatcher{
|
||||
patcher: p,
|
||||
schemaReferenceObj: schemaReferenceObj,
|
||||
fieldManager: scope.FieldManager,
|
||||
}
|
||||
// this case is unreachable if ServerSideApply is not enabled because we will have already rejected the content type
|
||||
case types.ApplyYAMLPatchType:
|
||||
p.mechanism = newApplyPatcher(p, scope.FieldManager, yaml.Unmarshal, yaml.UnmarshalStrict)
|
||||
p.forceAllowCreate = true
|
||||
case types.ApplyCBORPatchType:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
|
||||
utilruntime.HandleErrorWithContext(context.TODO(), nil, "CBOR apply requests should be rejected before reaching this point unless the feature gate is enabled.")
|
||||
return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType)
|
||||
}
|
||||
|
||||
// The strict and non-strict funcs are the same here because any CBOR map with
|
||||
// duplicate keys is invalid and always rejected outright regardless of strictness
|
||||
// mode, and unknown field errors can't occur in practice because the type of the
|
||||
// destination value for unmarshaling an apply configuration is always
|
||||
// "unstructured".
|
||||
p.mechanism = newApplyPatcher(p, scope.FieldManager, cbor.Unmarshal, cbor.Unmarshal)
|
||||
p.forceAllowCreate = true
|
||||
default:
|
||||
return nil, false, fmt.Errorf("%v: unimplemented patch type", p.patchType)
|
||||
}
|
||||
dedupOwnerReferencesTransformer := func(_ context.Context, obj, _ runtime.Object) (runtime.Object, error) {
|
||||
// Dedup owner references after mutating admission happens
|
||||
dedupOwnerReferencesAndAddWarning(obj, ctx, true)
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
transformers := []rest.TransformFunc{p.applyPatch, p.applyAdmission, dedupOwnerReferencesTransformer}
|
||||
|
||||
wasCreated := false
|
||||
p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil, transformers...)
|
||||
requestFunc := func() (runtime.Object, error) {
|
||||
// Pass in UpdateOptions to override UpdateStrategy.AllowUpdateOnCreate
|
||||
options := patchToUpdateOptions(p.options)
|
||||
updateObject, created, updateErr := p.restPatcher.Update(ctx, p.name, p.updatedObjectInfo, p.createValidation, p.updateValidation, p.forceAllowCreate, options)
|
||||
wasCreated = created
|
||||
return updateObject, updateErr
|
||||
}
|
||||
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
|
||||
|
||||
result, err := requestFunc()
|
||||
// If the object wasn't committed to storage because it's serialized size was too large,
|
||||
// it is safe to remove managedFields (which can be large) and try again.
|
||||
if isTooLargeError(err) && p.patchType != types.ApplyYAMLPatchType && p.patchType != types.ApplyCBORPatchType {
|
||||
if _, accessorErr := meta.Accessor(p.restPatcher.New()); accessorErr == nil {
|
||||
p.updatedObjectInfo = rest.DefaultUpdatedObjectInfo(nil,
|
||||
p.applyPatch,
|
||||
p.applyAdmission,
|
||||
dedupOwnerReferencesTransformer,
|
||||
func(_ context.Context, obj, _ runtime.Object) (runtime.Object, error) {
|
||||
accessor, _ := meta.Accessor(obj)
|
||||
accessor.SetManagedFields(nil)
|
||||
return obj, nil
|
||||
})
|
||||
result, err = requestFunc()
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
})
|
||||
return result, wasCreated, err
|
||||
}
|
||||
|
||||
// applyPatchToObject applies a strategic merge patch of <patchMap> to
|
||||
// <originalMap> and stores the result in <objToUpdate>.
|
||||
// NOTE: <objToUpdate> must be a versioned object.
|
||||
func applyPatchToObject(
|
||||
requestContext context.Context,
|
||||
defaulter runtime.ObjectDefaulter,
|
||||
originalMap map[string]interface{},
|
||||
patchMap map[string]interface{},
|
||||
objToUpdate runtime.Object,
|
||||
schemaReferenceObj runtime.Object,
|
||||
strictErrs []error,
|
||||
validationDirective string,
|
||||
) error {
|
||||
patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj)
|
||||
if err != nil {
|
||||
return interpretStrategicMergePatchError(err)
|
||||
}
|
||||
|
||||
// Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object
|
||||
converter := runtime.DefaultUnstructuredConverter
|
||||
returnUnknownFields := validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict
|
||||
if err := converter.FromUnstructuredWithValidation(patchedObjMap, objToUpdate, returnUnknownFields); err != nil {
|
||||
strictError, isStrictError := runtime.AsStrictDecodingError(err)
|
||||
switch {
|
||||
case !isStrictError:
|
||||
// disregard any sttrictErrs, because it's an incomplete
|
||||
// list of strict errors given that we don't know what fields were
|
||||
// unknown because StrategicMergeMapPatch failed.
|
||||
// Non-strict errors trump in this case.
|
||||
return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()),
|
||||
})
|
||||
case validationDirective == metav1.FieldValidationWarn:
|
||||
addStrictDecodingWarnings(requestContext, append(strictErrs, strictError.Errors()...))
|
||||
default:
|
||||
strictDecodingError := runtime.NewStrictDecodingError(append(strictErrs, strictError.Errors()...))
|
||||
return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), strictDecodingError.Error()),
|
||||
})
|
||||
}
|
||||
} else if len(strictErrs) > 0 {
|
||||
switch {
|
||||
case validationDirective == metav1.FieldValidationWarn:
|
||||
addStrictDecodingWarnings(requestContext, strictErrs)
|
||||
default:
|
||||
return errors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{
|
||||
field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Decoding from JSON to a versioned object would apply defaults, so we do the same here
|
||||
defaulter.Default(objToUpdate)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code.
|
||||
func interpretStrategicMergePatchError(err error) error {
|
||||
switch err {
|
||||
case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat:
|
||||
return errors.NewBadRequest(err.Error())
|
||||
case mergepatch.ErrNoListOfLists, mergepatch.ErrPatchContentNotMatchRetainKeys:
|
||||
return errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// patchToUpdateOptions creates an UpdateOptions with the same field values as the provided PatchOptions.
|
||||
func patchToUpdateOptions(po *metav1.PatchOptions) *metav1.UpdateOptions {
|
||||
if po == nil {
|
||||
return nil
|
||||
}
|
||||
uo := &metav1.UpdateOptions{
|
||||
DryRun: po.DryRun,
|
||||
FieldManager: po.FieldManager,
|
||||
FieldValidation: po.FieldValidation,
|
||||
}
|
||||
uo.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions"))
|
||||
return uo
|
||||
}
|
||||
|
||||
// patchToCreateOptions creates an CreateOptions with the same field values as the provided PatchOptions.
|
||||
func patchToCreateOptions(po *metav1.PatchOptions) *metav1.CreateOptions {
|
||||
if po == nil {
|
||||
return nil
|
||||
}
|
||||
co := &metav1.CreateOptions{
|
||||
DryRun: po.DryRun,
|
||||
FieldManager: po.FieldManager,
|
||||
FieldValidation: po.FieldValidation,
|
||||
}
|
||||
co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions"))
|
||||
return co
|
||||
}
|
583
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go
generated
vendored
Normal file
583
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go
generated
vendored
Normal file
@ -0,0 +1,583 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
endpointsrequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/util/apihelpers"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// watchEmbeddedEncoder performs encoding of the embedded object.
|
||||
//
|
||||
// NOTE: watchEmbeddedEncoder is NOT thread-safe.
|
||||
type watchEmbeddedEncoder struct {
|
||||
encoder runtime.Encoder
|
||||
|
||||
ctx context.Context
|
||||
|
||||
// target, if non-nil, configures transformation type.
|
||||
// The other options are ignored if target is nil.
|
||||
target *schema.GroupVersionKind
|
||||
tableOptions *metav1.TableOptions
|
||||
scope *RequestScope
|
||||
|
||||
// identifier of the encoder, computed lazily
|
||||
identifier runtime.Identifier
|
||||
}
|
||||
|
||||
func newWatchEmbeddedEncoder(ctx context.Context, encoder runtime.Encoder, target *schema.GroupVersionKind, tableOptions *metav1.TableOptions, scope *RequestScope) *watchEmbeddedEncoder {
|
||||
return &watchEmbeddedEncoder{
|
||||
encoder: encoder,
|
||||
ctx: ctx,
|
||||
target: target,
|
||||
tableOptions: tableOptions,
|
||||
scope: scope,
|
||||
}
|
||||
}
|
||||
|
||||
// Encode implements runtime.Encoder interface.
|
||||
func (e *watchEmbeddedEncoder) Encode(obj runtime.Object, w io.Writer) error {
|
||||
if co, ok := obj.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(e.Identifier(), e.doEncode, w)
|
||||
}
|
||||
return e.doEncode(obj, w)
|
||||
}
|
||||
|
||||
func (e *watchEmbeddedEncoder) doEncode(obj runtime.Object, w io.Writer) error {
|
||||
result, err := doTransformObject(e.ctx, obj, e.tableOptions, e.target, e.scope)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err))
|
||||
result = obj
|
||||
}
|
||||
|
||||
// When we are tranforming to a table, use the original table options when
|
||||
// we should print headers only on the first object - headers should be
|
||||
// omitted on subsequent events.
|
||||
if e.tableOptions != nil && !e.tableOptions.NoHeaders {
|
||||
e.tableOptions.NoHeaders = true
|
||||
// With options change, we should recompute the identifier.
|
||||
// Clearing this will trigger lazy recompute when needed.
|
||||
e.identifier = ""
|
||||
}
|
||||
|
||||
return e.encoder.Encode(result, w)
|
||||
}
|
||||
|
||||
// Identifier implements runtime.Encoder interface.
|
||||
func (e *watchEmbeddedEncoder) Identifier() runtime.Identifier {
|
||||
if e.identifier == "" {
|
||||
e.identifier = e.embeddedIdentifier()
|
||||
}
|
||||
return e.identifier
|
||||
}
|
||||
|
||||
type watchEmbeddedEncoderIdentifier struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Encoder string `json:"encoder,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
Options metav1.TableOptions `json:"options,omitempty"`
|
||||
NoHeaders bool `json:"noHeaders,omitempty"`
|
||||
}
|
||||
|
||||
func (e *watchEmbeddedEncoder) embeddedIdentifier() runtime.Identifier {
|
||||
if e.target == nil {
|
||||
// If no conversion is performed, we effective only use
|
||||
// the embedded identifier.
|
||||
return e.encoder.Identifier()
|
||||
}
|
||||
identifier := watchEmbeddedEncoderIdentifier{
|
||||
Name: "watch-embedded",
|
||||
Encoder: string(e.encoder.Identifier()),
|
||||
Target: e.target.String(),
|
||||
}
|
||||
if e.target.Kind == "Table" && e.tableOptions != nil {
|
||||
identifier.Options = *e.tableOptions
|
||||
identifier.NoHeaders = e.tableOptions.NoHeaders
|
||||
}
|
||||
|
||||
result, err := json.Marshal(identifier)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling identifier for watchEmbeddedEncoder: %v", err)
|
||||
}
|
||||
return runtime.Identifier(result)
|
||||
}
|
||||
|
||||
// watchEncoder performs encoding of the watch events.
|
||||
//
|
||||
// NOTE: watchEncoder is NOT thread-safe.
|
||||
type watchEncoder struct {
|
||||
ctx context.Context
|
||||
kind schema.GroupVersionKind
|
||||
embeddedEncoder runtime.Encoder
|
||||
encoder runtime.Encoder
|
||||
framer io.Writer
|
||||
|
||||
watchListTransformerFn watchListTransformerFunction
|
||||
|
||||
buffer runtime.Splice
|
||||
eventBuffer runtime.Splice
|
||||
|
||||
currentEmbeddedIdentifier runtime.Identifier
|
||||
identifiers map[watch.EventType]runtime.Identifier
|
||||
}
|
||||
|
||||
func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer, watchListTransformerFn watchListTransformerFunction) *watchEncoder {
|
||||
return &watchEncoder{
|
||||
ctx: ctx,
|
||||
kind: kind,
|
||||
embeddedEncoder: embeddedEncoder,
|
||||
encoder: encoder,
|
||||
framer: framer,
|
||||
watchListTransformerFn: watchListTransformerFn,
|
||||
buffer: runtime.NewSpliceBuffer(),
|
||||
eventBuffer: runtime.NewSpliceBuffer(),
|
||||
}
|
||||
}
|
||||
|
||||
// Encode encodes a given watch event.
|
||||
// NOTE: if events object is implementing the CacheableObject interface,
|
||||
//
|
||||
// the serialized version is cached in that object [not the event itself].
|
||||
func (e *watchEncoder) Encode(event watch.Event) error {
|
||||
encodeFunc := func(obj runtime.Object, w io.Writer) error {
|
||||
return e.doEncode(obj, event, w)
|
||||
}
|
||||
if event.Type == watch.Bookmark {
|
||||
// Bookmark objects are small, and we don't yet support serialization for them.
|
||||
// Additionally, we need to additionally transform them to support watch-list feature
|
||||
event = e.watchListTransformerFn(event)
|
||||
return encodeFunc(event.Object, e.framer)
|
||||
}
|
||||
if co, ok := event.Object.(runtime.CacheableObject); ok {
|
||||
return co.CacheEncode(e.identifier(event.Type), encodeFunc, e.framer)
|
||||
}
|
||||
return encodeFunc(event.Object, e.framer)
|
||||
}
|
||||
|
||||
func (e *watchEncoder) doEncode(obj runtime.Object, event watch.Event, w io.Writer) error {
|
||||
defer e.buffer.Reset()
|
||||
|
||||
if err := e.embeddedEncoder.Encode(obj, e.buffer); err != nil {
|
||||
return fmt.Errorf("unable to encode watch object %T: %v", obj, err)
|
||||
}
|
||||
|
||||
// ContentType is not required here because we are defaulting to the serializer type.
|
||||
outEvent := &metav1.WatchEvent{
|
||||
Type: string(event.Type),
|
||||
Object: runtime.RawExtension{Raw: e.buffer.Bytes()},
|
||||
}
|
||||
metrics.WatchEventsSizes.WithContext(e.ctx).WithLabelValues(e.kind.Group, e.kind.Version, e.kind.Kind).Observe(float64(len(outEvent.Object.Raw)))
|
||||
|
||||
defer e.eventBuffer.Reset()
|
||||
if err := e.encoder.Encode(outEvent, e.eventBuffer); err != nil {
|
||||
return fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e)
|
||||
}
|
||||
|
||||
_, err := w.Write(e.eventBuffer.Bytes())
|
||||
return err
|
||||
}
|
||||
|
||||
type watchEncoderIdentifier struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
EmbeddedEncoder string `json:"embeddedEncoder,omitempty"`
|
||||
Encoder string `json:"encoder,omitempty"`
|
||||
EventType string `json:"eventType,omitempty"`
|
||||
}
|
||||
|
||||
func (e *watchEncoder) identifier(eventType watch.EventType) runtime.Identifier {
|
||||
// We need to take into account that in embeddedEncoder includes table
|
||||
// transformer, then its identifier is dynamic. As a result, whenever
|
||||
// the identifier of embeddedEncoder changes, we need to invalidate the
|
||||
// whole identifiers cache.
|
||||
// TODO(wojtek-t): Can we optimize it somehow?
|
||||
if e.currentEmbeddedIdentifier != e.embeddedEncoder.Identifier() {
|
||||
e.currentEmbeddedIdentifier = e.embeddedEncoder.Identifier()
|
||||
e.identifiers = map[watch.EventType]runtime.Identifier{}
|
||||
}
|
||||
if _, ok := e.identifiers[eventType]; !ok {
|
||||
e.identifiers[eventType] = e.typeIdentifier(eventType)
|
||||
}
|
||||
return e.identifiers[eventType]
|
||||
}
|
||||
|
||||
func (e *watchEncoder) typeIdentifier(eventType watch.EventType) runtime.Identifier {
|
||||
// The eventType is a non-standard pattern. This is coming from the fact
|
||||
// that we're effectively serializing the whole watch event, but storing
|
||||
// it in serializations of the Object within the watch event.
|
||||
identifier := watchEncoderIdentifier{
|
||||
Name: "watch",
|
||||
EmbeddedEncoder: string(e.embeddedEncoder.Identifier()),
|
||||
Encoder: string(e.encoder.Identifier()),
|
||||
EventType: string(eventType),
|
||||
}
|
||||
|
||||
result, err := json.Marshal(identifier)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed marshaling identifier for watchEncoder: %v", err)
|
||||
}
|
||||
return runtime.Identifier(result)
|
||||
}
|
||||
|
||||
// doTransformResponseObject is used for handling all requests, including watch.
|
||||
func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, target *schema.GroupVersionKind, scope *RequestScope) (runtime.Object, error) {
|
||||
if _, ok := obj.(*metav1.Status); ok {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case target == nil:
|
||||
// If we ever change that from a no-op, the identifier of
|
||||
// the watchEmbeddedEncoder has to be adjusted accordingly.
|
||||
return obj, nil
|
||||
|
||||
case target.Kind == "PartialObjectMetadata":
|
||||
return asPartialObjectMetadata(obj, target.GroupVersion())
|
||||
|
||||
case target.Kind == "PartialObjectMetadataList":
|
||||
return asPartialObjectMetadataList(obj, target.GroupVersion())
|
||||
|
||||
case target.Kind == "Table":
|
||||
options, ok := opts.(*metav1.TableOptions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected TableOptions, got %T", opts)
|
||||
}
|
||||
return asTable(ctx, obj, options, scope, target.GroupVersion())
|
||||
|
||||
default:
|
||||
accepted, _ := negotiation.MediaTypesForSerializer(apihelpers.GetMetaInternalVersionCodecs())
|
||||
err := negotiation.NewNotAcceptableError(accepted)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// optionsForTransform will load and validate any additional query parameter options for
|
||||
// a conversion or return an error.
|
||||
func optionsForTransform(mediaType negotiation.MediaTypeOptions, req *http.Request) (interface{}, error) {
|
||||
switch target := mediaType.Convert; {
|
||||
case target == nil:
|
||||
case target.Kind == "Table" && (target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion):
|
||||
opts := &metav1.TableOptions{}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch errs := validation.ValidateTableOptions(opts); len(errs) {
|
||||
case 0:
|
||||
return opts, nil
|
||||
case 1:
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("Unable to convert to Table as requested: %v", errs[0].Error()))
|
||||
default:
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("Unable to convert to Table as requested: %v", errs))
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// targetEncodingForTransform returns the appropriate serializer for the input media type
|
||||
func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.MediaTypeOptions, req *http.Request) (schema.GroupVersionKind, runtime.NegotiatedSerializer, bool) {
|
||||
switch target := mediaType.Convert; {
|
||||
case target == nil:
|
||||
case (target.Kind == "PartialObjectMetadata" || target.Kind == "PartialObjectMetadataList" || target.Kind == "Table") &&
|
||||
(target.GroupVersion() == metav1beta1.SchemeGroupVersion || target.GroupVersion() == metav1.SchemeGroupVersion):
|
||||
return *target, apihelpers.GetMetaInternalVersionCodecs(), true
|
||||
}
|
||||
return scope.Kind, scope.Serializer, false
|
||||
}
|
||||
|
||||
// transformResponseObject takes an object loaded from storage and performs any necessary transformations.
|
||||
// Will write the complete response object.
|
||||
// transformResponseObject is used only for handling non-streaming requests.
|
||||
func transformResponseObject(ctx context.Context, scope *RequestScope, req *http.Request, w http.ResponseWriter, statusCode int, mediaType negotiation.MediaTypeOptions, result runtime.Object) {
|
||||
options, err := optionsForTransform(mediaType, req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// ensure that for empty lists we don't return <nil> items.
|
||||
// This is safe to modify without deep-copying the object, as
|
||||
// List objects themselves are never cached.
|
||||
if meta.IsListType(result) && meta.LenList(result) == 0 {
|
||||
if err := meta.SetList(result, []runtime.Object{}); err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var obj runtime.Object
|
||||
do := func() {
|
||||
obj, err = doTransformObject(ctx, result, options, mediaType.Convert, scope)
|
||||
}
|
||||
endpointsrequest.TrackTransformResponseObjectLatency(ctx, do)
|
||||
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
kind, serializer, _ := targetEncodingForTransform(scope, mediaType, req)
|
||||
responsewriters.WriteObjectNegotiated(serializer, scope, kind.GroupVersion(), w, req, statusCode, obj, false)
|
||||
}
|
||||
|
||||
// errNotAcceptable indicates Accept negotiation has failed
|
||||
type errNotAcceptable struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func newNotAcceptableError(message string) error {
|
||||
return errNotAcceptable{message}
|
||||
}
|
||||
|
||||
func (e errNotAcceptable) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e errNotAcceptable) Status() metav1.Status {
|
||||
return metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusNotAcceptable,
|
||||
Reason: metav1.StatusReason("NotAcceptable"),
|
||||
Message: e.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
func asTable(ctx context.Context, result runtime.Object, opts *metav1.TableOptions, scope *RequestScope, groupVersion schema.GroupVersion) (runtime.Object, error) {
|
||||
switch groupVersion {
|
||||
case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion:
|
||||
default:
|
||||
return nil, newNotAcceptableError(fmt.Sprintf("no Table exists in group version %s", groupVersion))
|
||||
}
|
||||
|
||||
obj, err := scope.TableConvertor.ConvertToTable(ctx, result, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table := (*metav1.Table)(obj)
|
||||
|
||||
for i := range table.Rows {
|
||||
item := &table.Rows[i]
|
||||
switch opts.IncludeObject {
|
||||
case metav1.IncludeObject:
|
||||
item.Object.Object, err = scope.Convertor.ConvertToVersion(item.Object.Object, scope.Kind.GroupVersion())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: rely on defaulting for the value here?
|
||||
case metav1.IncludeMetadata, "":
|
||||
m, err := meta.Accessor(item.Object.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: turn this into an internal type and do conversion in order to get object kind automatically set?
|
||||
partial := meta.AsPartialObjectMetadata(m)
|
||||
partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata"))
|
||||
item.Object.Object = partial
|
||||
case metav1.IncludeNone:
|
||||
item.Object.Object = nil
|
||||
default:
|
||||
err = errors.NewBadRequest(fmt.Sprintf("unrecognized includeObject value: %q", opts.IncludeObject))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return table, nil
|
||||
}
|
||||
|
||||
func asPartialObjectMetadata(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) {
|
||||
if meta.IsListType(result) {
|
||||
err := newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadata, but the requested object is a list (%T)", result))
|
||||
return nil, err
|
||||
}
|
||||
switch groupVersion {
|
||||
case metav1beta1.SchemeGroupVersion, metav1.SchemeGroupVersion:
|
||||
default:
|
||||
return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion))
|
||||
}
|
||||
m, err := meta.Accessor(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
partial := meta.AsPartialObjectMetadata(m)
|
||||
partial.GetObjectKind().SetGroupVersionKind(groupVersion.WithKind("PartialObjectMetadata"))
|
||||
return partial, nil
|
||||
}
|
||||
|
||||
func asPartialObjectMetadataList(result runtime.Object, groupVersion schema.GroupVersion) (runtime.Object, error) {
|
||||
li, ok := result.(metav1.ListInterface)
|
||||
if !ok {
|
||||
return nil, newNotAcceptableError(fmt.Sprintf("you requested PartialObjectMetadataList, but the requested object is not a list (%T)", result))
|
||||
}
|
||||
|
||||
gvk := groupVersion.WithKind("PartialObjectMetadata")
|
||||
switch {
|
||||
case groupVersion == metav1beta1.SchemeGroupVersion:
|
||||
list := &metav1beta1.PartialObjectMetadataList{}
|
||||
err := meta.EachListItem(result, func(obj runtime.Object) error {
|
||||
m, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partial := meta.AsPartialObjectMetadata(m)
|
||||
partial.GetObjectKind().SetGroupVersionKind(gvk)
|
||||
list.Items = append(list.Items, *partial)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list.ResourceVersion = li.GetResourceVersion()
|
||||
list.Continue = li.GetContinue()
|
||||
list.RemainingItemCount = li.GetRemainingItemCount()
|
||||
return list, nil
|
||||
|
||||
case groupVersion == metav1.SchemeGroupVersion:
|
||||
list := &metav1.PartialObjectMetadataList{}
|
||||
err := meta.EachListItem(result, func(obj runtime.Object) error {
|
||||
m, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partial := meta.AsPartialObjectMetadata(m)
|
||||
partial.GetObjectKind().SetGroupVersionKind(gvk)
|
||||
list.Items = append(list.Items, *partial)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list.ResourceVersion = li.GetResourceVersion()
|
||||
list.Continue = li.GetContinue()
|
||||
list.RemainingItemCount = li.GetRemainingItemCount()
|
||||
return list, nil
|
||||
|
||||
default:
|
||||
return nil, newNotAcceptableError(fmt.Sprintf("no PartialObjectMetadataList exists in group version %s", groupVersion))
|
||||
}
|
||||
}
|
||||
|
||||
// watchListTransformerFunction an optional function
|
||||
// applied to watchlist bookmark events that transforms
|
||||
// the embedded object before sending it to a client.
|
||||
type watchListTransformerFunction func(watch.Event) watch.Event
|
||||
|
||||
// watchListTransformer performs transformation of
|
||||
// a special watchList bookmark event.
|
||||
//
|
||||
// The bookmark is annotated with InitialEventsListBlueprintAnnotationKey
|
||||
// and contains an empty, versioned list that we must encode in the requested format
|
||||
// (e.g., protobuf, JSON, CBOR) and then store as a base64-encoded string.
|
||||
type watchListTransformer struct {
|
||||
initialEventsListBlueprint runtime.Object
|
||||
targetGVK *schema.GroupVersionKind
|
||||
negotiatedEncoder runtime.Encoder
|
||||
buffer runtime.Splice
|
||||
}
|
||||
|
||||
// createWatchListTransformerIfRequested returns a transformer function for watchlist bookmark event.
|
||||
func newWatchListTransformer(initialEventsListBlueprint runtime.Object, targetGVK *schema.GroupVersionKind, negotiatedEncoder runtime.Encoder) *watchListTransformer {
|
||||
return &watchListTransformer{
|
||||
initialEventsListBlueprint: initialEventsListBlueprint,
|
||||
targetGVK: targetGVK,
|
||||
negotiatedEncoder: negotiatedEncoder,
|
||||
buffer: runtime.NewSpliceBuffer(),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *watchListTransformer) transform(event watch.Event) watch.Event {
|
||||
if e.initialEventsListBlueprint == nil {
|
||||
return event
|
||||
}
|
||||
hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object)
|
||||
if err != nil {
|
||||
return newWatchEventErrorFor(err)
|
||||
}
|
||||
if !hasAnnotation {
|
||||
return event
|
||||
}
|
||||
|
||||
if err = e.encodeInitialEventsListBlueprint(event.Object); err != nil {
|
||||
return newWatchEventErrorFor(err)
|
||||
}
|
||||
|
||||
return event
|
||||
}
|
||||
|
||||
func (e *watchListTransformer) encodeInitialEventsListBlueprint(object runtime.Object) error {
|
||||
initialEventsListBlueprint, err := e.transformInitialEventsListBlueprint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer e.buffer.Reset()
|
||||
if err = e.negotiatedEncoder.Encode(initialEventsListBlueprint, e.buffer); err != nil {
|
||||
return err
|
||||
}
|
||||
encodedInitialEventsListBlueprint := e.buffer.Bytes()
|
||||
|
||||
// the storage layer creates a deep copy of the obj before modifying it.
|
||||
// since the object has the annotation, we can modify it directly.
|
||||
objectMeta, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
annotations := objectMeta.GetAnnotations()
|
||||
annotations[metav1.InitialEventsListBlueprintAnnotationKey] = base64.StdEncoding.EncodeToString(encodedInitialEventsListBlueprint)
|
||||
objectMeta.SetAnnotations(annotations)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *watchListTransformer) transformInitialEventsListBlueprint() (runtime.Object, error) {
|
||||
if e.targetGVK != nil && e.targetGVK.Kind == "PartialObjectMetadata" {
|
||||
return asPartialObjectMetadataList(e.initialEventsListBlueprint, e.targetGVK.GroupVersion())
|
||||
}
|
||||
return e.initialEventsListBlueprint, nil
|
||||
}
|
||||
|
||||
func newWatchEventErrorFor(err error) watch.Event {
|
||||
return watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: &metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Message: err.Error(),
|
||||
Reason: metav1.StatusReasonInternalError,
|
||||
Code: http.StatusInternalServerError,
|
||||
},
|
||||
}
|
||||
}
|
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go
generated
vendored
Normal file
18
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package responsewriters containers helpers to write responses in HTTP handlers.
|
||||
package responsewriters // import "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
84
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go
generated
vendored
Normal file
84
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package responsewriters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
)
|
||||
|
||||
// Avoid emitting errors that look like valid HTML. Quotes are okay.
|
||||
var sanitizer = strings.NewReplacer(`&`, "&", `<`, "<", `>`, ">")
|
||||
|
||||
// Forbidden renders a simple forbidden error
|
||||
func Forbidden(ctx context.Context, attributes authorizer.Attributes, w http.ResponseWriter, req *http.Request, reason string, s runtime.NegotiatedSerializer) {
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
gv := schema.GroupVersion{Group: attributes.GetAPIGroup(), Version: attributes.GetAPIVersion()}
|
||||
ErrorNegotiated(ForbiddenStatusError(attributes, reason), s, gv, w, req)
|
||||
}
|
||||
|
||||
func ForbiddenStatusError(attributes authorizer.Attributes, reason string) *apierrors.StatusError {
|
||||
msg := sanitizer.Replace(forbiddenMessage(attributes))
|
||||
|
||||
var errMsg error
|
||||
if len(reason) == 0 {
|
||||
errMsg = fmt.Errorf("%s", msg)
|
||||
} else {
|
||||
errMsg = fmt.Errorf("%s: %s", msg, reason)
|
||||
}
|
||||
|
||||
gr := schema.GroupResource{Group: attributes.GetAPIGroup(), Resource: attributes.GetResource()}
|
||||
|
||||
return apierrors.NewForbidden(gr, attributes.GetName(), errMsg)
|
||||
}
|
||||
|
||||
func forbiddenMessage(attributes authorizer.Attributes) string {
|
||||
username := ""
|
||||
if user := attributes.GetUser(); user != nil {
|
||||
username = user.GetName()
|
||||
}
|
||||
|
||||
if !attributes.IsResourceRequest() {
|
||||
return fmt.Sprintf("User %q cannot %s path %q", username, attributes.GetVerb(), attributes.GetPath())
|
||||
}
|
||||
|
||||
resource := attributes.GetResource()
|
||||
if subresource := attributes.GetSubresource(); len(subresource) > 0 {
|
||||
resource = resource + "/" + subresource
|
||||
}
|
||||
|
||||
if ns := attributes.GetNamespace(); len(ns) > 0 {
|
||||
return fmt.Sprintf("User %q cannot %s resource %q in API group %q in the namespace %q", username, attributes.GetVerb(), resource, attributes.GetAPIGroup(), ns)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("User %q cannot %s resource %q in API group %q at the cluster scope", username, attributes.GetVerb(), resource, attributes.GetAPIGroup())
|
||||
}
|
||||
|
||||
// InternalError renders a simple internal error
|
||||
func InternalError(w http.ResponseWriter, req *http.Request, err error) {
|
||||
http.Error(w, sanitizer.Replace(fmt.Sprintf("Internal Server Error: %q: %v", req.RequestURI, err)),
|
||||
http.StatusInternalServerError)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
83
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go
generated
vendored
Normal file
83
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package responsewriters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
)
|
||||
|
||||
// statusError is an object that can be converted into an metav1.Status
|
||||
type statusError interface {
|
||||
Status() metav1.Status
|
||||
}
|
||||
|
||||
// ErrorToAPIStatus converts an error to an metav1.Status object.
|
||||
func ErrorToAPIStatus(err error) *metav1.Status {
|
||||
switch t := err.(type) {
|
||||
case statusError:
|
||||
status := t.Status()
|
||||
if len(status.Status) == 0 {
|
||||
status.Status = metav1.StatusFailure
|
||||
}
|
||||
switch status.Status {
|
||||
case metav1.StatusSuccess:
|
||||
if status.Code == 0 {
|
||||
status.Code = http.StatusOK
|
||||
}
|
||||
case metav1.StatusFailure:
|
||||
if status.Code == 0 {
|
||||
status.Code = http.StatusInternalServerError
|
||||
}
|
||||
default:
|
||||
runtime.HandleError(fmt.Errorf("apiserver received an error with wrong status field : %#+v", err))
|
||||
if status.Code == 0 {
|
||||
status.Code = http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
status.Kind = "Status"
|
||||
status.APIVersion = "v1"
|
||||
//TODO: check for invalid responses
|
||||
return &status
|
||||
default:
|
||||
status := http.StatusInternalServerError
|
||||
switch {
|
||||
//TODO: replace me with NewConflictErr
|
||||
case storage.IsConflict(err):
|
||||
status = http.StatusConflict
|
||||
}
|
||||
// Log errors that were not converted to an error status
|
||||
// by REST storage - these typically indicate programmer
|
||||
// error by not using pkg/api/errors, or unexpected failure
|
||||
// cases.
|
||||
runtime.HandleError(fmt.Errorf("apiserver received an error that is not an metav1.Status: %#+v: %v", err, err))
|
||||
return &metav1.Status{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Status",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Status: metav1.StatusFailure,
|
||||
Code: int32(status),
|
||||
Reason: metav1.StatusReasonUnknown,
|
||||
Message: err.Error(),
|
||||
}
|
||||
}
|
||||
}
|
349
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go
generated
vendored
Normal file
349
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go
generated
vendored
Normal file
@ -0,0 +1,349 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package responsewriters
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream/wsstream"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/apiserver/pkg/util/flushwriter"
|
||||
"k8s.io/component-base/tracing"
|
||||
)
|
||||
|
||||
// StreamObject performs input stream negotiation from a ResourceStreamer and writes that to the response.
|
||||
// If the client requests a websocket upgrade, negotiate for a websocket reader protocol (because many
|
||||
// browser clients cannot easily handle binary streaming protocols).
|
||||
func StreamObject(statusCode int, gv schema.GroupVersion, s runtime.NegotiatedSerializer, stream rest.ResourceStreamer, w http.ResponseWriter, req *http.Request) {
|
||||
out, flush, contentType, err := stream.InputStream(req.Context(), gv.String(), req.Header.Get("Accept"))
|
||||
if err != nil {
|
||||
ErrorNegotiated(err, s, gv, w, req)
|
||||
return
|
||||
}
|
||||
if out == nil {
|
||||
// No output provided - return StatusNoContent
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if wsstream.IsWebSocketRequest(req) {
|
||||
r := wsstream.NewReader(out, true, wsstream.NewDefaultReaderProtocols())
|
||||
if err := r.Copy(w, req); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error encountered while streaming results via websocket: %v", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if len(contentType) == 0 {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.WriteHeader(statusCode)
|
||||
// Flush headers, if possible
|
||||
if flusher, ok := w.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
writer := w.(io.Writer)
|
||||
if flush {
|
||||
writer = flushwriter.Wrap(w)
|
||||
}
|
||||
io.Copy(writer, out)
|
||||
}
|
||||
|
||||
// SerializeObject renders an object in the content type negotiated by the client using the provided encoder.
|
||||
// The context is optional and can be nil. This method will perform optional content compression if requested by
|
||||
// a client and the feature gate for APIResponseCompression is enabled.
|
||||
func SerializeObject(mediaType string, encoder runtime.Encoder, hw http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object) {
|
||||
ctx := req.Context()
|
||||
ctx, span := tracing.Start(ctx, "SerializeObject",
|
||||
attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)),
|
||||
attribute.String("method", req.Method),
|
||||
attribute.String("url", req.URL.Path),
|
||||
attribute.String("protocol", req.Proto),
|
||||
attribute.String("mediaType", mediaType),
|
||||
attribute.String("encoder", string(encoder.Identifier())))
|
||||
req = req.WithContext(ctx)
|
||||
defer span.End(5 * time.Second)
|
||||
|
||||
w := &deferredResponseWriter{
|
||||
mediaType: mediaType,
|
||||
statusCode: statusCode,
|
||||
contentEncoding: negotiateContentEncoding(req),
|
||||
hw: hw,
|
||||
ctx: ctx,
|
||||
}
|
||||
|
||||
err := encoder.Encode(object, w)
|
||||
if err == nil {
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
// we cannot write an error to the writer anymore as the Encode call was successful.
|
||||
utilruntime.HandleError(fmt.Errorf("apiserver was unable to close cleanly the response writer: %v", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// make a best effort to write the object if a failure is detected
|
||||
utilruntime.HandleError(fmt.Errorf("apiserver was unable to write a JSON response: %v", err))
|
||||
status := ErrorToAPIStatus(err)
|
||||
candidateStatusCode := int(status.Code)
|
||||
// if the current status code is successful, allow the error's status code to overwrite it
|
||||
if statusCode >= http.StatusOK && statusCode < http.StatusBadRequest {
|
||||
w.statusCode = candidateStatusCode
|
||||
}
|
||||
output, err := runtime.Encode(encoder, status)
|
||||
if err != nil {
|
||||
w.mediaType = "text/plain"
|
||||
output = []byte(fmt.Sprintf("%s: %s", status.Reason, status.Message))
|
||||
}
|
||||
if _, err := w.Write(output); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("apiserver was unable to write a fallback JSON response: %v", err))
|
||||
}
|
||||
w.Close()
|
||||
}
|
||||
|
||||
var gzipPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
gw, err := gzip.NewWriterLevel(nil, defaultGzipContentEncodingLevel)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gw
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultGzipContentEncodingLevel is set to 1 which uses least CPU compared to higher levels, yet offers
|
||||
// similar compression ratios (off by at most 1.5x, but typically within 1.1x-1.3x). For further details see -
|
||||
// https://github.com/kubernetes/kubernetes/issues/112296
|
||||
defaultGzipContentEncodingLevel = 1
|
||||
// defaultGzipThresholdBytes is compared to the size of the first write from the stream
|
||||
// (usually the entire object), and if the size is smaller no gzipping will be performed
|
||||
// if the client requests it.
|
||||
defaultGzipThresholdBytes = 128 * 1024
|
||||
)
|
||||
|
||||
// negotiateContentEncoding returns a supported client-requested content encoding for the
|
||||
// provided request. It will return the empty string if no supported content encoding was
|
||||
// found or if response compression is disabled.
|
||||
func negotiateContentEncoding(req *http.Request) string {
|
||||
encoding := req.Header.Get("Accept-Encoding")
|
||||
if len(encoding) == 0 {
|
||||
return ""
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.APIResponseCompression) {
|
||||
return ""
|
||||
}
|
||||
for len(encoding) > 0 {
|
||||
var token string
|
||||
if next := strings.Index(encoding, ","); next != -1 {
|
||||
token = encoding[:next]
|
||||
encoding = encoding[next+1:]
|
||||
} else {
|
||||
token = encoding
|
||||
encoding = ""
|
||||
}
|
||||
switch strings.TrimSpace(token) {
|
||||
case "gzip":
|
||||
return "gzip"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type deferredResponseWriter struct {
|
||||
mediaType string
|
||||
statusCode int
|
||||
contentEncoding string
|
||||
|
||||
hasWritten bool
|
||||
hw http.ResponseWriter
|
||||
w io.Writer
|
||||
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (w *deferredResponseWriter) Write(p []byte) (n int, err error) {
|
||||
ctx := w.ctx
|
||||
span := tracing.SpanFromContext(ctx)
|
||||
// This Step usually wraps in-memory object serialization.
|
||||
span.AddEvent("About to start writing response", attribute.Int("size", len(p)))
|
||||
|
||||
firstWrite := !w.hasWritten
|
||||
defer func() {
|
||||
if err != nil {
|
||||
span.AddEvent("Write call failed",
|
||||
attribute.String("writer", fmt.Sprintf("%T", w.w)),
|
||||
attribute.Int("size", len(p)),
|
||||
attribute.Bool("firstWrite", firstWrite),
|
||||
attribute.String("err", err.Error()))
|
||||
} else {
|
||||
span.AddEvent("Write call succeeded",
|
||||
attribute.String("writer", fmt.Sprintf("%T", w.w)),
|
||||
attribute.Int("size", len(p)),
|
||||
attribute.Bool("firstWrite", firstWrite))
|
||||
}
|
||||
}()
|
||||
if w.hasWritten {
|
||||
return w.w.Write(p)
|
||||
}
|
||||
w.hasWritten = true
|
||||
|
||||
hw := w.hw
|
||||
header := hw.Header()
|
||||
switch {
|
||||
case w.contentEncoding == "gzip" && len(p) > defaultGzipThresholdBytes:
|
||||
header.Set("Content-Encoding", "gzip")
|
||||
header.Add("Vary", "Accept-Encoding")
|
||||
|
||||
gw := gzipPool.Get().(*gzip.Writer)
|
||||
gw.Reset(hw)
|
||||
|
||||
w.w = gw
|
||||
default:
|
||||
w.w = hw
|
||||
}
|
||||
|
||||
header.Set("Content-Type", w.mediaType)
|
||||
hw.WriteHeader(w.statusCode)
|
||||
return w.w.Write(p)
|
||||
}
|
||||
|
||||
func (w *deferredResponseWriter) Close() error {
|
||||
if !w.hasWritten {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
switch t := w.w.(type) {
|
||||
case *gzip.Writer:
|
||||
err = t.Close()
|
||||
t.Reset(nil)
|
||||
gzipPool.Put(t)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteObjectNegotiated renders an object in the content type negotiated by the client.
|
||||
func WriteObjectNegotiated(s runtime.NegotiatedSerializer, restrictions negotiation.EndpointRestrictions, gv schema.GroupVersion, w http.ResponseWriter, req *http.Request, statusCode int, object runtime.Object, listGVKInContentType bool) {
|
||||
stream, ok := object.(rest.ResourceStreamer)
|
||||
if ok {
|
||||
requestInfo, _ := request.RequestInfoFrom(req.Context())
|
||||
metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {
|
||||
StreamObject(statusCode, gv, s, stream, w, req)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
mediaType, serializer, err := negotiation.NegotiateOutputMediaType(req, s, restrictions)
|
||||
if err != nil {
|
||||
// if original statusCode was not successful we need to return the original error
|
||||
// we cannot hide it behind negotiation problems
|
||||
if statusCode < http.StatusOK || statusCode >= http.StatusBadRequest {
|
||||
WriteRawJSON(int(statusCode), object, w)
|
||||
return
|
||||
}
|
||||
status := ErrorToAPIStatus(err)
|
||||
WriteRawJSON(int(status.Code), status, w)
|
||||
return
|
||||
}
|
||||
|
||||
audit.LogResponseObject(req.Context(), object, gv, s)
|
||||
|
||||
var encoder runtime.Encoder
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
|
||||
encoder = s.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.Serializer), gv)
|
||||
} else {
|
||||
encoder = s.EncoderForVersion(serializer.Serializer, gv)
|
||||
}
|
||||
request.TrackSerializeResponseObjectLatency(req.Context(), func() {
|
||||
if listGVKInContentType {
|
||||
SerializeObject(generateMediaTypeWithGVK(serializer.MediaType, mediaType.Convert), encoder, w, req, statusCode, object)
|
||||
} else {
|
||||
SerializeObject(serializer.MediaType, encoder, w, req, statusCode, object)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func generateMediaTypeWithGVK(mediaType string, gvk *schema.GroupVersionKind) string {
|
||||
if gvk == nil {
|
||||
return mediaType
|
||||
}
|
||||
if gvk.Group != "" {
|
||||
mediaType += ";g=" + gvk.Group
|
||||
}
|
||||
if gvk.Version != "" {
|
||||
mediaType += ";v=" + gvk.Version
|
||||
}
|
||||
if gvk.Kind != "" {
|
||||
mediaType += ";as=" + gvk.Kind
|
||||
}
|
||||
return mediaType
|
||||
}
|
||||
|
||||
// ErrorNegotiated renders an error to the response. Returns the HTTP status code of the error.
|
||||
// The context is optional and may be nil.
|
||||
func ErrorNegotiated(err error, s runtime.NegotiatedSerializer, gv schema.GroupVersion, w http.ResponseWriter, req *http.Request) int {
|
||||
status := ErrorToAPIStatus(err)
|
||||
code := int(status.Code)
|
||||
// when writing an error, check to see if the status indicates a retry after period
|
||||
if status.Details != nil && status.Details.RetryAfterSeconds > 0 {
|
||||
delay := strconv.Itoa(int(status.Details.RetryAfterSeconds))
|
||||
w.Header().Set("Retry-After", delay)
|
||||
}
|
||||
|
||||
if code == http.StatusNoContent {
|
||||
w.WriteHeader(code)
|
||||
return code
|
||||
}
|
||||
|
||||
WriteObjectNegotiated(s, negotiation.DefaultEndpointRestrictions, gv, w, req, code, status, false)
|
||||
return code
|
||||
}
|
||||
|
||||
// WriteRawJSON writes a non-API object in JSON.
|
||||
func WriteRawJSON(statusCode int, object interface{}, w http.ResponseWriter) {
|
||||
output, err := json.MarshalIndent(object, "", " ")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(statusCode)
|
||||
w.Write(output)
|
||||
}
|
472
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go
generated
vendored
Normal file
472
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go
generated
vendored
Normal file
@ -0,0 +1,472 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
grpccodes "google.golang.org/grpc/codes"
|
||||
grpcstatus "google.golang.org/grpc/status"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/warning"
|
||||
)
|
||||
|
||||
const (
|
||||
// 34 chose as a number close to 30 that is likely to be unique enough to jump out at me the next time I see a timeout.
|
||||
// Everyone chooses 30.
|
||||
requestTimeoutUpperBound = 34 * time.Second
|
||||
// DuplicateOwnerReferencesWarningFormat is the warning that a client receives when a create/update request contains
|
||||
// duplicate owner reference entries.
|
||||
DuplicateOwnerReferencesWarningFormat = ".metadata.ownerReferences contains duplicate entries; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v"
|
||||
// DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat indicates the duplication was observed
|
||||
// after mutating admission.
|
||||
// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.
|
||||
// For PATCH request the API server only dedups after mutating admission.
|
||||
DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat = ".metadata.ownerReferences contains duplicate entries after mutating admission happens; API server dedups owner references in 1.20+, and may reject such requests as early as 1.24; please fix your requests; duplicate UID(s) observed: %v"
|
||||
// shortPrefix is one possible beginning of yaml unmarshal strict errors.
|
||||
shortPrefix = "yaml: unmarshal errors:\n"
|
||||
// longPrefix is the other possible beginning of yaml unmarshal strict errors.
|
||||
longPrefix = "error converting YAML to JSON: yaml: unmarshal errors:\n"
|
||||
)
|
||||
|
||||
// RequestScope encapsulates common fields across all RESTful handler methods.
|
||||
type RequestScope struct {
|
||||
Namer ScopeNamer
|
||||
|
||||
Serializer runtime.NegotiatedSerializer
|
||||
runtime.ParameterCodec
|
||||
|
||||
// StandardSerializers, if set, restricts which serializers can be used when
|
||||
// we aren't transforming the output (into Table or PartialObjectMetadata).
|
||||
// Used only by CRDs which do not yet support Protobuf.
|
||||
StandardSerializers []runtime.SerializerInfo
|
||||
|
||||
Creater runtime.ObjectCreater
|
||||
Convertor runtime.ObjectConvertor
|
||||
Defaulter runtime.ObjectDefaulter
|
||||
Typer runtime.ObjectTyper
|
||||
UnsafeConvertor runtime.ObjectConvertor
|
||||
Authorizer authorizer.Authorizer
|
||||
|
||||
EquivalentResourceMapper runtime.EquivalentResourceMapper
|
||||
|
||||
TableConvertor rest.TableConvertor
|
||||
FieldManager *managedfields.FieldManager
|
||||
|
||||
Resource schema.GroupVersionResource
|
||||
Kind schema.GroupVersionKind
|
||||
|
||||
// AcceptsGroupVersionDelegate is an optional delegate that can be queried about whether a given GVK
|
||||
// can be accepted in create or update requests. If nil, only scope.Kind is accepted.
|
||||
// Note that this does not enable multi-version support for reads from a single endpoint.
|
||||
AcceptsGroupVersionDelegate rest.GroupVersionAcceptor
|
||||
|
||||
Subresource string
|
||||
|
||||
MetaGroupVersion schema.GroupVersion
|
||||
|
||||
// HubGroupVersion indicates what version objects read from etcd or incoming requests should be converted to for in-memory handling.
|
||||
HubGroupVersion schema.GroupVersion
|
||||
|
||||
MaxRequestBodyBytes int64
|
||||
}
|
||||
|
||||
func (scope *RequestScope) err(err error, w http.ResponseWriter, req *http.Request) {
|
||||
responsewriters.ErrorNegotiated(err, scope.Serializer, scope.Kind.GroupVersion(), w, req)
|
||||
}
|
||||
|
||||
// AcceptsGroupVersion returns true if the specified GroupVersion is allowed
|
||||
// in create and update requests.
|
||||
func (scope *RequestScope) AcceptsGroupVersion(gv schema.GroupVersion) bool {
|
||||
// If there's a custom acceptor, delegate to it. This is extremely rare.
|
||||
if scope.AcceptsGroupVersionDelegate != nil {
|
||||
return scope.AcceptsGroupVersionDelegate.AcceptsGroupVersion(gv)
|
||||
}
|
||||
// Fall back to only allowing the singular Kind. This is the typical behavior.
|
||||
return gv == scope.Kind.GroupVersion()
|
||||
}
|
||||
|
||||
func (scope *RequestScope) AllowsMediaTypeTransform(mimeType, mimeSubType string, gvk *schema.GroupVersionKind) bool {
|
||||
// some handlers like CRDs can't serve all the mime types that PartialObjectMetadata or Table can - if
|
||||
// gvk is nil (no conversion) allow StandardSerializers to further restrict the set of mime types.
|
||||
if gvk == nil {
|
||||
if len(scope.StandardSerializers) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, info := range scope.StandardSerializers {
|
||||
if info.MediaTypeType == mimeType && info.MediaTypeSubType == mimeSubType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: this is temporary, replace with an abstraction calculated at endpoint installation time
|
||||
if gvk.GroupVersion() == metav1beta1.SchemeGroupVersion || gvk.GroupVersion() == metav1.SchemeGroupVersion {
|
||||
switch gvk.Kind {
|
||||
case "Table":
|
||||
return scope.TableConvertor != nil &&
|
||||
mimeType == "application" &&
|
||||
(mimeSubType == "json" || mimeSubType == "yaml")
|
||||
case "PartialObjectMetadata", "PartialObjectMetadataList":
|
||||
// TODO: should delineate between lists and non-list endpoints
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (scope *RequestScope) AllowsServerVersion(version string) bool {
|
||||
return version == scope.MetaGroupVersion.Version
|
||||
}
|
||||
|
||||
func (scope *RequestScope) AllowsStreamSchema(s string) bool {
|
||||
return s == "watch"
|
||||
}
|
||||
|
||||
var _ admission.ObjectInterfaces = &RequestScope{}
|
||||
|
||||
func (r *RequestScope) GetObjectCreater() runtime.ObjectCreater { return r.Creater }
|
||||
func (r *RequestScope) GetObjectTyper() runtime.ObjectTyper { return r.Typer }
|
||||
func (r *RequestScope) GetObjectDefaulter() runtime.ObjectDefaulter { return r.Defaulter }
|
||||
func (r *RequestScope) GetObjectConvertor() runtime.ObjectConvertor { return r.Convertor }
|
||||
func (r *RequestScope) GetEquivalentResourceMapper() runtime.EquivalentResourceMapper {
|
||||
return r.EquivalentResourceMapper
|
||||
}
|
||||
|
||||
// ConnectResource returns a function that handles a connect request on a rest.Storage object.
|
||||
func ConnectResource(connecter rest.Connecter, scope *RequestScope, admit admission.Interface, restPath string, isSubresource bool) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
if isDryRun(req.URL) {
|
||||
scope.err(errors.NewBadRequest("dryRun is not supported"), w, req)
|
||||
return
|
||||
}
|
||||
|
||||
namespace, name, err := scope.Namer.Name(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
ctx := req.Context()
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
admit = admission.WithAudit(admit)
|
||||
|
||||
opts, subpath, subpathKey := connecter.NewConnectOptions()
|
||||
if err := getRequestOptions(req, scope, opts, subpath, subpathKey, isSubresource); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
if admit != nil && admit.Handles(admission.Connect) {
|
||||
userInfo, _ := request.UserFrom(ctx)
|
||||
// TODO: remove the mutating admission here as soon as we have ported all plugin that handle CONNECT
|
||||
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok {
|
||||
err = mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
if validatingAdmission, ok := admit.(admission.ValidationInterface); ok {
|
||||
err = validatingAdmission.Validate(ctx, admission.NewAttributesRecord(opts, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Connect, nil, false, userInfo), scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
requestInfo, _ := request.RequestInfoFrom(ctx)
|
||||
metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() {
|
||||
handler, err := connecter.Connect(ctx, name, opts, &responder{scope: scope, req: req, w: w})
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
handler.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// responder implements rest.Responder for assisting a connector in writing objects or errors.
|
||||
type responder struct {
|
||||
scope *RequestScope
|
||||
req *http.Request
|
||||
w http.ResponseWriter
|
||||
}
|
||||
|
||||
func (r *responder) Object(statusCode int, obj runtime.Object) {
|
||||
responsewriters.WriteObjectNegotiated(r.scope.Serializer, r.scope, r.scope.Kind.GroupVersion(), r.w, r.req, statusCode, obj, false)
|
||||
}
|
||||
|
||||
func (r *responder) Error(err error) {
|
||||
r.scope.err(err, r.w, r.req)
|
||||
}
|
||||
|
||||
// transformDecodeError adds additional information into a bad-request api error when a decode fails.
|
||||
func transformDecodeError(typer runtime.ObjectTyper, baseErr error, into runtime.Object, gvk *schema.GroupVersionKind, body []byte) error {
|
||||
objGVKs, _, err := typer.ObjectKinds(into)
|
||||
if err != nil {
|
||||
return errors.NewBadRequest(err.Error())
|
||||
}
|
||||
objGVK := objGVKs[0]
|
||||
if gvk != nil && len(gvk.Kind) > 0 {
|
||||
return errors.NewBadRequest(fmt.Sprintf("%s in version %q cannot be handled as a %s: %v", gvk.Kind, gvk.Version, objGVK.Kind, baseErr))
|
||||
}
|
||||
summary := summarizeData(body, 30)
|
||||
return errors.NewBadRequest(fmt.Sprintf("the object provided is unrecognized (must be of type %s): %v (%s)", objGVK.Kind, baseErr, summary))
|
||||
}
|
||||
|
||||
func hasUID(obj runtime.Object) (bool, error) {
|
||||
if obj == nil {
|
||||
return false, nil
|
||||
}
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return false, errors.NewInternalError(err)
|
||||
}
|
||||
if len(accessor.GetUID()) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// checkName checks the provided name against the request
|
||||
func checkName(obj runtime.Object, name, namespace string, namer ScopeNamer) error {
|
||||
objNamespace, objName, err := namer.ObjectName(obj)
|
||||
if err != nil {
|
||||
return errors.NewBadRequest(fmt.Sprintf(
|
||||
"the name of the object (%s based on URL) was undeterminable: %v", name, err))
|
||||
}
|
||||
if objName != name {
|
||||
return errors.NewBadRequest(fmt.Sprintf(
|
||||
"the name of the object (%s) does not match the name on the URL (%s)", objName, name))
|
||||
}
|
||||
if len(namespace) > 0 {
|
||||
if len(objNamespace) > 0 && objNamespace != namespace {
|
||||
return errors.NewBadRequest(fmt.Sprintf(
|
||||
"the namespace of the object (%s) does not match the namespace on the request (%s)", objNamespace, namespace))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dedupOwnerReferences dedups owner references over the entire entry.
|
||||
// NOTE: We don't know enough about the existing cases of owner references
|
||||
// sharing the same UID but different fields. Nor do we know what might break.
|
||||
// In the future we may just dedup/reject owner references with the same UID.
|
||||
func dedupOwnerReferences(refs []metav1.OwnerReference) ([]metav1.OwnerReference, []string) {
|
||||
var result []metav1.OwnerReference
|
||||
var duplicates []string
|
||||
seen := make(map[types.UID]struct{})
|
||||
for _, ref := range refs {
|
||||
_, ok := seen[ref.UID]
|
||||
// Short-circuit if we haven't seen the UID before. Otherwise
|
||||
// check the entire list we have so far.
|
||||
if !ok || !hasOwnerReference(result, ref) {
|
||||
seen[ref.UID] = struct{}{}
|
||||
result = append(result, ref)
|
||||
} else {
|
||||
duplicates = append(duplicates, string(ref.UID))
|
||||
}
|
||||
}
|
||||
return result, duplicates
|
||||
}
|
||||
|
||||
// hasOwnerReference returns true if refs has an item equal to ref. The function
|
||||
// focuses on semantic equality instead of memory equality, to catch duplicates
|
||||
// with different pointer addresses. The function uses apiequality.Semantic
|
||||
// instead of implementing its own comparison, to tolerate API changes to
|
||||
// metav1.OwnerReference.
|
||||
// NOTE: This is expensive, but we accept it because we've made sure it only
|
||||
// happens to owner references containing duplicate UIDs, plus typically the
|
||||
// number of items in the list should be small.
|
||||
func hasOwnerReference(refs []metav1.OwnerReference, ref metav1.OwnerReference) bool {
|
||||
for _, r := range refs {
|
||||
if apiequality.Semantic.DeepEqual(r, ref) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// dedupOwnerReferencesAndAddWarning dedups owner references in the object metadata.
|
||||
// If duplicates are found, the function records a warning to the provided context.
|
||||
func dedupOwnerReferencesAndAddWarning(obj runtime.Object, requestContext context.Context, afterMutatingAdmission bool) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
// The object doesn't have metadata. Nothing we need to do here.
|
||||
return
|
||||
}
|
||||
refs := accessor.GetOwnerReferences()
|
||||
deduped, duplicates := dedupOwnerReferences(refs)
|
||||
if len(duplicates) > 0 {
|
||||
// NOTE: For CREATE and UPDATE requests the API server dedups both before and after mutating admission.
|
||||
// For PATCH request the API server only dedups after mutating admission.
|
||||
format := DuplicateOwnerReferencesWarningFormat
|
||||
if afterMutatingAdmission {
|
||||
format = DuplicateOwnerReferencesAfterMutatingAdmissionWarningFormat
|
||||
}
|
||||
warning.AddWarning(requestContext, "", fmt.Sprintf(format,
|
||||
strings.Join(duplicates, ", ")))
|
||||
accessor.SetOwnerReferences(deduped)
|
||||
}
|
||||
}
|
||||
|
||||
func summarizeData(data []byte, maxLength int) string {
|
||||
switch {
|
||||
case len(data) == 0:
|
||||
return "<empty>"
|
||||
case data[0] == '{':
|
||||
if len(data) > maxLength {
|
||||
return string(data[:maxLength]) + " ..."
|
||||
}
|
||||
return string(data)
|
||||
default:
|
||||
if len(data) > maxLength {
|
||||
return hex.EncodeToString(data[:maxLength]) + " ..."
|
||||
}
|
||||
return hex.EncodeToString(data)
|
||||
}
|
||||
}
|
||||
|
||||
func limitedReadBody(req *http.Request, limit int64) ([]byte, error) {
|
||||
defer req.Body.Close()
|
||||
if limit <= 0 {
|
||||
return ioutil.ReadAll(req.Body)
|
||||
}
|
||||
lr := &io.LimitedReader{
|
||||
R: req.Body,
|
||||
N: limit + 1,
|
||||
}
|
||||
data, err := ioutil.ReadAll(lr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lr.N <= 0 {
|
||||
return nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf("limit is %d", limit))
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func limitedReadBodyWithRecordMetric(ctx context.Context, req *http.Request, limit int64, resourceGroup string, verb requestmetrics.RequestBodyVerb) ([]byte, error) {
|
||||
readBody, err := limitedReadBody(req, limit)
|
||||
if err == nil {
|
||||
// only record if we've read successfully
|
||||
requestmetrics.RecordRequestBodySize(ctx, resourceGroup, verb, len(readBody))
|
||||
}
|
||||
return readBody, err
|
||||
}
|
||||
|
||||
func isDryRun(url *url.URL) bool {
|
||||
return len(url.Query()["dryRun"]) != 0
|
||||
}
|
||||
|
||||
// fieldValidation checks that the field validation feature is enabled
|
||||
// and returns a valid directive of either
|
||||
// - Ignore
|
||||
// - Warn (default)
|
||||
// - Strict
|
||||
func fieldValidation(directive string) string {
|
||||
if directive == "" {
|
||||
return metav1.FieldValidationWarn
|
||||
}
|
||||
return directive
|
||||
}
|
||||
|
||||
// parseYAMLWarnings takes the strict decoding errors from the yaml decoder's output
|
||||
// and parses each individual warnings, or leaves the warning as is if
|
||||
// it does not look like a yaml strict decoding error.
|
||||
func parseYAMLWarnings(errString string) []string {
|
||||
var trimmedString string
|
||||
if trimmedShortString := strings.TrimPrefix(errString, shortPrefix); len(trimmedShortString) < len(errString) {
|
||||
trimmedString = trimmedShortString
|
||||
} else if trimmedLongString := strings.TrimPrefix(errString, longPrefix); len(trimmedLongString) < len(errString) {
|
||||
trimmedString = trimmedLongString
|
||||
} else {
|
||||
// not a yaml error, return as-is
|
||||
return []string{errString}
|
||||
}
|
||||
|
||||
splitStrings := strings.Split(trimmedString, "\n")
|
||||
for i, s := range splitStrings {
|
||||
splitStrings[i] = strings.TrimSpace(s)
|
||||
}
|
||||
return splitStrings
|
||||
}
|
||||
|
||||
// addStrictDecodingWarnings confirms that the error is a strict decoding error
|
||||
// and if so adds a warning for each strict decoding violation.
|
||||
func addStrictDecodingWarnings(requestContext context.Context, errs []error) {
|
||||
for _, e := range errs {
|
||||
yamlWarnings := parseYAMLWarnings(e.Error())
|
||||
for _, w := range yamlWarnings {
|
||||
warning.AddWarning(requestContext, "", w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type etcdError interface {
|
||||
Code() grpccodes.Code
|
||||
Error() string
|
||||
}
|
||||
|
||||
type grpcError interface {
|
||||
GRPCStatus() *grpcstatus.Status
|
||||
}
|
||||
|
||||
func isTooLargeError(err error) bool {
|
||||
if err != nil {
|
||||
if etcdErr, ok := err.(etcdError); ok {
|
||||
if etcdErr.Code() == grpccodes.InvalidArgument && etcdErr.Error() == "etcdserver: request is too large" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if grpcErr, ok := err.(grpcError); ok {
|
||||
if grpcErr.GRPCStatus().Code() == grpccodes.ResourceExhausted && strings.Contains(grpcErr.GRPCStatus().Message(), "trying to send message larger than max") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
42
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go
generated
vendored
Normal file
42
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
func traceFields(req *http.Request) []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
attribute.Stringer("accept", &lazyAccept{req: req}),
|
||||
attribute.Stringer("audit-id", &lazyAuditID{req: req}),
|
||||
attribute.Stringer("client", &lazyClientIP{req: req}),
|
||||
attribute.Stringer("api-group", &lazyAPIGroup{req: req}),
|
||||
attribute.Stringer("api-version", &lazyAPIVersion{req: req}),
|
||||
attribute.Stringer("name", &lazyName{req: req}),
|
||||
attribute.Stringer("subresource", &lazySubresource{req: req}),
|
||||
attribute.Stringer("namespace", &lazyNamespace{req: req}),
|
||||
attribute.String("protocol", req.Proto),
|
||||
attribute.Stringer("resource", &lazyResource{req: req}),
|
||||
attribute.Stringer("scope", &lazyScope{req: req}),
|
||||
attribute.String("url", req.URL.Path),
|
||||
attribute.Stringer("user-agent", &lazyTruncatedUserAgent{req: req}),
|
||||
attribute.Stringer("verb", &lazyVerb{req: req}),
|
||||
}
|
||||
}
|
296
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go
generated
vendored
Normal file
296
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go
generated
vendored
Normal file
@ -0,0 +1,296 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/finisher"
|
||||
requestmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/util/dryrun"
|
||||
"k8s.io/component-base/tracing"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// UpdateResource returns a function that will handle a resource update
|
||||
func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interface) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
ctx := req.Context()
|
||||
// For performance tracking purposes.
|
||||
ctx, span := tracing.Start(ctx, "Update", traceFields(req)...)
|
||||
req = req.WithContext(ctx)
|
||||
defer span.End(500 * time.Millisecond)
|
||||
|
||||
namespace, name, err := scope.Namer.Name(req)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// enforce a timeout of at most requestTimeoutUpperBound (34s) or less if the user-provided
|
||||
// timeout inside the parent context is lower than requestTimeoutUpperBound.
|
||||
ctx, cancel := context.WithTimeout(ctx, requestTimeoutUpperBound)
|
||||
defer cancel()
|
||||
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
|
||||
outputMediaType, _, err := negotiation.NegotiateOutputMediaType(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := limitedReadBodyWithRecordMetric(ctx, req, scope.MaxRequestBodyBytes, scope.Resource.GroupResource().String(), requestmetrics.Update)
|
||||
if err != nil {
|
||||
span.AddEvent("limitedReadBody failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("limitedReadBody succeeded", attribute.Int("len", len(body)))
|
||||
|
||||
options := &metav1.UpdateOptions{}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), scope.MetaGroupVersion, options); err != nil {
|
||||
err = errors.NewBadRequest(err.Error())
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
if errs := validation.ValidateUpdateOptions(options); len(errs) > 0 {
|
||||
err := errors.NewInvalid(schema.GroupKind{Group: metav1.GroupName, Kind: "UpdateOptions"}, "", errs)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
options.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("UpdateOptions"))
|
||||
|
||||
s, err := negotiation.NegotiateInputSerializer(req, false, scope.Serializer)
|
||||
if err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
defaultGVK := scope.Kind
|
||||
original := r.New()
|
||||
|
||||
validationDirective := fieldValidation(options.FieldValidation)
|
||||
decodeSerializer := s.Serializer
|
||||
if validationDirective == metav1.FieldValidationWarn || validationDirective == metav1.FieldValidationStrict {
|
||||
decodeSerializer = s.StrictSerializer
|
||||
}
|
||||
|
||||
decoder := scope.Serializer.DecoderToVersion(decodeSerializer, scope.HubGroupVersion)
|
||||
span.AddEvent("About to convert to expected version")
|
||||
obj, gvk, err := decoder.Decode(body, &defaultGVK, original)
|
||||
if err != nil {
|
||||
strictError, isStrictError := runtime.AsStrictDecodingError(err)
|
||||
switch {
|
||||
case isStrictError && obj != nil && validationDirective == metav1.FieldValidationWarn:
|
||||
addStrictDecodingWarnings(req.Context(), strictError.Errors())
|
||||
case isStrictError && validationDirective == metav1.FieldValidationIgnore:
|
||||
klog.Warningf("unexpected strict error when field validation is set to ignore")
|
||||
fallthrough
|
||||
default:
|
||||
err = transformDecodeError(scope.Typer, err, original, gvk, body)
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objGV := gvk.GroupVersion()
|
||||
if !scope.AcceptsGroupVersion(objGV) {
|
||||
err = errors.NewBadRequest(fmt.Sprintf("the API version in the data (%s) does not match the expected API version (%s)", objGV, defaultGVK.GroupVersion()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Conversion done")
|
||||
|
||||
audit.LogRequestObject(req.Context(), obj, objGV, scope.Resource, scope.Subresource, scope.Serializer)
|
||||
admit = admission.WithAudit(admit)
|
||||
|
||||
// if this object supports namespace info
|
||||
if objectMeta, err := meta.Accessor(obj); err == nil {
|
||||
// ensure namespace on the object is correct, or error if a conflicting namespace was set in the object
|
||||
if err := rest.EnsureObjectNamespaceMatchesRequestNamespace(rest.ExpectedNamespaceForResource(namespace, scope.Resource), objectMeta); err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkName(obj, name, namespace, scope.Namer); err != nil {
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
|
||||
userInfo, _ := request.UserFrom(ctx)
|
||||
transformers := []rest.TransformFunc{}
|
||||
|
||||
// allows skipping managedFields update if the resulting object is too big
|
||||
shouldUpdateManagedFields := true
|
||||
admit = fieldmanager.NewManagedFieldsValidatingAdmissionController(admit)
|
||||
transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) {
|
||||
if shouldUpdateManagedFields {
|
||||
return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil
|
||||
}
|
||||
return newObj, nil
|
||||
})
|
||||
|
||||
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok {
|
||||
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
|
||||
isNotZeroObject, err := hasUID(oldObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unexpected error when extracting UID from oldObj: %v", err.Error())
|
||||
} else if !isNotZeroObject {
|
||||
if mutatingAdmission.Handles(admission.Create) {
|
||||
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope)
|
||||
}
|
||||
} else {
|
||||
if mutatingAdmission.Handles(admission.Update) {
|
||||
return newObj, mutatingAdmission.Admit(ctx, admission.NewAttributesRecord(newObj, oldObj, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope)
|
||||
}
|
||||
}
|
||||
return newObj, nil
|
||||
})
|
||||
transformers = append(transformers, func(ctx context.Context, newObj, oldObj runtime.Object) (runtime.Object, error) {
|
||||
// Dedup owner references again after mutating admission happens
|
||||
dedupOwnerReferencesAndAddWarning(newObj, req.Context(), true)
|
||||
return newObj, nil
|
||||
})
|
||||
}
|
||||
|
||||
createAuthorizerAttributes := authorizer.AttributesRecord{
|
||||
User: userInfo,
|
||||
ResourceRequest: true,
|
||||
Path: req.URL.Path,
|
||||
Verb: "create",
|
||||
APIGroup: scope.Resource.Group,
|
||||
APIVersion: scope.Resource.Version,
|
||||
Resource: scope.Resource.Resource,
|
||||
Subresource: scope.Subresource,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
span.AddEvent("About to store object in database")
|
||||
wasCreated := false
|
||||
requestFunc := func() (runtime.Object, error) {
|
||||
obj, created, err := r.Update(
|
||||
ctx,
|
||||
name,
|
||||
rest.DefaultUpdatedObjectInfo(obj, transformers...),
|
||||
withAuthorization(rest.AdmissionToValidateObjectFunc(
|
||||
admit,
|
||||
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Create, updateToCreateOptions(options), dryrun.IsDryRun(options.DryRun), userInfo), scope),
|
||||
scope.Authorizer, createAuthorizerAttributes),
|
||||
rest.AdmissionToValidateObjectUpdateFunc(
|
||||
admit,
|
||||
admission.NewAttributesRecord(nil, nil, scope.Kind, namespace, name, scope.Resource, scope.Subresource, admission.Update, options, dryrun.IsDryRun(options.DryRun), userInfo), scope),
|
||||
false,
|
||||
options,
|
||||
)
|
||||
wasCreated = created
|
||||
return obj, err
|
||||
}
|
||||
// Dedup owner references before updating managed fields
|
||||
dedupOwnerReferencesAndAddWarning(obj, req.Context(), false)
|
||||
result, err := finisher.FinishRequest(ctx, func() (runtime.Object, error) {
|
||||
result, err := requestFunc()
|
||||
// If the object wasn't committed to storage because it's serialized size was too large,
|
||||
// it is safe to remove managedFields (which can be large) and try again.
|
||||
if isTooLargeError(err) {
|
||||
if accessor, accessorErr := meta.Accessor(obj); accessorErr == nil {
|
||||
accessor.SetManagedFields(nil)
|
||||
shouldUpdateManagedFields = false
|
||||
result, err = requestFunc()
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
})
|
||||
if err != nil {
|
||||
span.AddEvent("Write to database call failed", attribute.Int("len", len(body)), attribute.String("err", err.Error()))
|
||||
scope.err(err, w, req)
|
||||
return
|
||||
}
|
||||
span.AddEvent("Write to database call succeeded", attribute.Int("len", len(body)))
|
||||
|
||||
status := http.StatusOK
|
||||
if wasCreated {
|
||||
status = http.StatusCreated
|
||||
}
|
||||
|
||||
span.AddEvent("About to write a response")
|
||||
defer span.AddEvent("Writing http response done")
|
||||
transformResponseObject(ctx, scope, req, w, status, outputMediaType, result)
|
||||
}
|
||||
}
|
||||
|
||||
func withAuthorization(validate rest.ValidateObjectFunc, a authorizer.Authorizer, attributes authorizer.Attributes) rest.ValidateObjectFunc {
|
||||
var once sync.Once
|
||||
var authorizerDecision authorizer.Decision
|
||||
var authorizerReason string
|
||||
var authorizerErr error
|
||||
return func(ctx context.Context, obj runtime.Object) error {
|
||||
if a == nil {
|
||||
return errors.NewInternalError(fmt.Errorf("no authorizer provided, unable to authorize a create on update"))
|
||||
}
|
||||
once.Do(func() {
|
||||
authorizerDecision, authorizerReason, authorizerErr = a.Authorize(ctx, attributes)
|
||||
})
|
||||
// an authorizer like RBAC could encounter evaluation errors and still allow the request, so authorizer decision is checked before error here.
|
||||
if authorizerDecision == authorizer.DecisionAllow {
|
||||
// Continue to validating admission
|
||||
return validate(ctx, obj)
|
||||
}
|
||||
if authorizerErr != nil {
|
||||
return errors.NewInternalError(authorizerErr)
|
||||
}
|
||||
|
||||
// The user is not authorized to perform this action, so we need to build the error response
|
||||
return responsewriters.ForbiddenStatusError(attributes, authorizerReason)
|
||||
}
|
||||
}
|
||||
|
||||
// updateToCreateOptions creates a CreateOptions with the same field values as the provided UpdateOptions.
|
||||
func updateToCreateOptions(uo *metav1.UpdateOptions) *metav1.CreateOptions {
|
||||
if uo == nil {
|
||||
return nil
|
||||
}
|
||||
co := &metav1.CreateOptions{
|
||||
DryRun: uo.DryRun,
|
||||
FieldManager: uo.FieldManager,
|
||||
FieldValidation: uo.FieldValidation,
|
||||
}
|
||||
co.TypeMeta.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("CreateOptions"))
|
||||
return co
|
||||
}
|
386
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go
generated
vendored
Normal file
386
e2e/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/httpstream/wsstream"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/endpoints/handlers/negotiation"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
)
|
||||
|
||||
// nothing will ever be sent down this channel
|
||||
var neverExitWatch <-chan time.Time = make(chan time.Time)
|
||||
|
||||
// timeoutFactory abstracts watch timeout logic for testing
|
||||
type TimeoutFactory interface {
|
||||
TimeoutCh() (<-chan time.Time, func() bool)
|
||||
}
|
||||
|
||||
// realTimeoutFactory implements timeoutFactory
|
||||
type realTimeoutFactory struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// TimeoutCh returns a channel which will receive something when the watch times out,
|
||||
// and a cleanup function to call when this happens.
|
||||
func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) {
|
||||
if w.timeout == 0 {
|
||||
return neverExitWatch, func() bool { return false }
|
||||
}
|
||||
t := time.NewTimer(w.timeout)
|
||||
return t.C, t.Stop
|
||||
}
|
||||
|
||||
// serveWatchHandler returns a handle to serve a watch response.
|
||||
// TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled.
|
||||
func serveWatchHandler(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string, initialEventsListBlueprint runtime.Object) (http.Handler, error) {
|
||||
options, err := optionsForTransform(mediaTypeOptions, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// negotiate for the stream serializer from the scope's serializer
|
||||
serializer, err := negotiation.NegotiateOutputMediaTypeStream(req, scope.Serializer, scope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
framer := serializer.StreamSerializer.Framer
|
||||
var encoder runtime.Encoder
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
|
||||
encoder = scope.Serializer.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.StreamSerializer.Serializer), scope.Kind.GroupVersion())
|
||||
} else {
|
||||
encoder = scope.Serializer.EncoderForVersion(serializer.StreamSerializer.Serializer, scope.Kind.GroupVersion())
|
||||
}
|
||||
useTextFraming := serializer.EncodesAsText
|
||||
if framer == nil {
|
||||
return nil, fmt.Errorf("no framer defined for %q available for embedded encoding", serializer.MediaType)
|
||||
}
|
||||
// TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here
|
||||
mediaType := serializer.MediaType
|
||||
switch mediaType {
|
||||
case runtime.ContentTypeJSON:
|
||||
// as-is
|
||||
case runtime.ContentTypeCBOR:
|
||||
// If a client indicated it accepts application/cbor (exactly one data item) on a
|
||||
// watch request, set the conformant application/cbor-seq media type the watch
|
||||
// response. RFC 9110 allows an origin server to deviate from the indicated
|
||||
// preference rather than send a 406 (Not Acceptable) response (see
|
||||
// https://www.rfc-editor.org/rfc/rfc9110.html#section-12.1-5).
|
||||
mediaType = runtime.ContentTypeCBORSequence
|
||||
default:
|
||||
mediaType += ";stream=watch"
|
||||
}
|
||||
|
||||
ctx := req.Context()
|
||||
|
||||
// locate the appropriate embedded encoder based on the transform
|
||||
var negotiatedEncoder runtime.Encoder
|
||||
contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req)
|
||||
if transform {
|
||||
info, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no encoder for %q exists in the requested target %#v", serializer.MediaType, contentSerializer)
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
|
||||
negotiatedEncoder = contentSerializer.EncoderForVersion(runtime.UseNondeterministicEncoding(info.Serializer), contentKind.GroupVersion())
|
||||
} else {
|
||||
negotiatedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion())
|
||||
}
|
||||
} else {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CBORServingAndStorage) {
|
||||
negotiatedEncoder = scope.Serializer.EncoderForVersion(runtime.UseNondeterministicEncoding(serializer.Serializer), contentKind.GroupVersion())
|
||||
} else {
|
||||
negotiatedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion())
|
||||
}
|
||||
}
|
||||
|
||||
var memoryAllocator runtime.MemoryAllocator
|
||||
|
||||
if encoderWithAllocator, supportsAllocator := negotiatedEncoder.(runtime.EncoderWithAllocator); supportsAllocator {
|
||||
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
|
||||
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
|
||||
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
|
||||
negotiatedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
|
||||
}
|
||||
var tableOptions *metav1.TableOptions
|
||||
if options != nil {
|
||||
if passedOptions, ok := options.(*metav1.TableOptions); ok {
|
||||
tableOptions = passedOptions
|
||||
} else {
|
||||
return nil, fmt.Errorf("unexpected options type: %T", options)
|
||||
}
|
||||
}
|
||||
embeddedEncoder := newWatchEmbeddedEncoder(ctx, negotiatedEncoder, mediaTypeOptions.Convert, tableOptions, scope)
|
||||
|
||||
if encoderWithAllocator, supportsAllocator := encoder.(runtime.EncoderWithAllocator); supportsAllocator {
|
||||
if memoryAllocator == nil {
|
||||
// don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.
|
||||
// instead, we allocate the buffer for the entire watch session and release it when we close the connection.
|
||||
memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)
|
||||
}
|
||||
encoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator)
|
||||
}
|
||||
|
||||
var serverShuttingDownCh <-chan struct{}
|
||||
if signals := apirequest.ServerShutdownSignalFrom(req.Context()); signals != nil {
|
||||
serverShuttingDownCh = signals.ShuttingDown()
|
||||
}
|
||||
|
||||
server := &WatchServer{
|
||||
Watching: watcher,
|
||||
Scope: scope,
|
||||
|
||||
UseTextFraming: useTextFraming,
|
||||
MediaType: mediaType,
|
||||
Framer: framer,
|
||||
Encoder: encoder,
|
||||
EmbeddedEncoder: embeddedEncoder,
|
||||
|
||||
watchListTransformerFn: newWatchListTransformer(initialEventsListBlueprint, mediaTypeOptions.Convert, negotiatedEncoder).transform,
|
||||
|
||||
MemoryAllocator: memoryAllocator,
|
||||
TimeoutFactory: &realTimeoutFactory{timeout},
|
||||
ServerShuttingDownCh: serverShuttingDownCh,
|
||||
|
||||
metricsScope: metricsScope,
|
||||
}
|
||||
|
||||
if wsstream.IsWebSocketRequest(req) {
|
||||
w.Header().Set("Content-Type", server.MediaType)
|
||||
return websocket.Handler(server.HandleWS), nil
|
||||
}
|
||||
return http.HandlerFunc(server.HandleHTTP), nil
|
||||
}
|
||||
|
||||
// WatchServer serves a watch.Interface over a websocket or vanilla HTTP.
|
||||
type WatchServer struct {
|
||||
Watching watch.Interface
|
||||
Scope *RequestScope
|
||||
|
||||
// true if websocket messages should use text framing (as opposed to binary framing)
|
||||
UseTextFraming bool
|
||||
// the media type this watch is being served with
|
||||
MediaType string
|
||||
// used to frame the watch stream
|
||||
Framer runtime.Framer
|
||||
// used to encode the watch stream event itself
|
||||
Encoder runtime.Encoder
|
||||
// used to encode the nested object in the watch stream
|
||||
EmbeddedEncoder runtime.Encoder
|
||||
// watchListTransformerFn a function applied
|
||||
// to watchlist bookmark events that transforms
|
||||
// the embedded object before sending it to a client.
|
||||
watchListTransformerFn watchListTransformerFunction
|
||||
|
||||
MemoryAllocator runtime.MemoryAllocator
|
||||
TimeoutFactory TimeoutFactory
|
||||
ServerShuttingDownCh <-chan struct{}
|
||||
|
||||
metricsScope string
|
||||
}
|
||||
|
||||
// HandleHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked.
|
||||
// or over a websocket connection.
|
||||
func (s *WatchServer) HandleHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
defer func() {
|
||||
if s.MemoryAllocator != nil {
|
||||
runtime.AllocatorPool.Put(s.MemoryAllocator)
|
||||
}
|
||||
}()
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
err := fmt.Errorf("unable to start watch - can't get http.Flusher: %#v", w)
|
||||
utilruntime.HandleError(err)
|
||||
s.Scope.err(errors.NewInternalError(err), w, req)
|
||||
return
|
||||
}
|
||||
|
||||
framer := s.Framer.NewFrameWriter(w)
|
||||
if framer == nil {
|
||||
// programmer error
|
||||
err := fmt.Errorf("no stream framing support is available for media type %q", s.MediaType)
|
||||
utilruntime.HandleError(err)
|
||||
s.Scope.err(errors.NewBadRequest(err.Error()), w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// ensure the connection times out
|
||||
timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh()
|
||||
defer cleanup()
|
||||
|
||||
// begin the stream
|
||||
w.Header().Set("Content-Type", s.MediaType)
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
flusher.Flush()
|
||||
|
||||
kind := s.Scope.Kind
|
||||
watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer, s.watchListTransformerFn)
|
||||
ch := s.Watching.ResultChan()
|
||||
done := req.Context().Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.ServerShuttingDownCh:
|
||||
// the server has signaled that it is shutting down (not accepting
|
||||
// any new request), all active watch request(s) should return
|
||||
// immediately here. The WithWatchTerminationDuringShutdown server
|
||||
// filter will ensure that the response to the client is rate
|
||||
// limited in order to avoid any thundering herd issue when the
|
||||
// client(s) try to reestablish the WATCH on the other
|
||||
// available apiserver instance(s).
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
case <-timeoutCh:
|
||||
return
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
// End of results.
|
||||
return
|
||||
}
|
||||
metrics.WatchEvents.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc()
|
||||
isWatchListLatencyRecordingRequired := shouldRecordWatchListLatency(event)
|
||||
|
||||
if err := watchEncoder.Encode(event); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
// client disconnect.
|
||||
return
|
||||
}
|
||||
|
||||
if len(ch) == 0 {
|
||||
flusher.Flush()
|
||||
}
|
||||
if isWatchListLatencyRecordingRequired {
|
||||
metrics.RecordWatchListLatency(req.Context(), s.Scope.Resource, s.metricsScope)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandleWS serves a series of encoded events over a websocket connection.
|
||||
func (s *WatchServer) HandleWS(ws *websocket.Conn) {
|
||||
defer func() {
|
||||
if s.MemoryAllocator != nil {
|
||||
runtime.AllocatorPool.Put(s.MemoryAllocator)
|
||||
}
|
||||
}()
|
||||
|
||||
defer ws.Close()
|
||||
done := make(chan struct{})
|
||||
// ensure the connection times out
|
||||
timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh()
|
||||
defer cleanup()
|
||||
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
// This blocks until the connection is closed.
|
||||
// Client should not send anything.
|
||||
wsstream.IgnoreReceives(ws, 0)
|
||||
// Once the client closes, we should also close
|
||||
close(done)
|
||||
}()
|
||||
|
||||
framer := newWebsocketFramer(ws, s.UseTextFraming)
|
||||
|
||||
kind := s.Scope.Kind
|
||||
watchEncoder := newWatchEncoder(context.TODO(), kind, s.EmbeddedEncoder, s.Encoder, framer, s.watchListTransformerFn)
|
||||
ch := s.Watching.ResultChan()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-timeoutCh:
|
||||
return
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
// End of results.
|
||||
return
|
||||
}
|
||||
|
||||
if err := watchEncoder.Encode(event); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
// client disconnect.
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type websocketFramer struct {
|
||||
ws *websocket.Conn
|
||||
useTextFraming bool
|
||||
}
|
||||
|
||||
func newWebsocketFramer(ws *websocket.Conn, useTextFraming bool) io.Writer {
|
||||
return &websocketFramer{
|
||||
ws: ws,
|
||||
useTextFraming: useTextFraming,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *websocketFramer) Write(p []byte) (int, error) {
|
||||
if w.useTextFraming {
|
||||
// bytes.Buffer::String() has a special handling of nil value, but given
|
||||
// we're writing serialized watch events, this will never happen here.
|
||||
if err := websocket.Message.Send(w.ws, string(p)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
if err := websocket.Message.Send(w.ws, p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
var _ io.Writer = &websocketFramer{}
|
||||
|
||||
func shouldRecordWatchListLatency(event watch.Event) bool {
|
||||
if event.Type != watch.Bookmark || !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) {
|
||||
return false
|
||||
}
|
||||
// as of today the initial-events-end annotation is added only to a single event
|
||||
// by the watch cache and only when certain conditions are met
|
||||
//
|
||||
// for more please read https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list
|
||||
hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to determine if the obj has the required annotation for measuring watchlist latency, obj %T: %v", event.Object, err))
|
||||
return false
|
||||
}
|
||||
return hasAnnotation
|
||||
}
|
1349
e2e/vendor/k8s.io/apiserver/pkg/endpoints/installer.go
generated
vendored
Normal file
1349
e2e/vendor/k8s.io/apiserver/pkg/endpoints/installer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
6
e2e/vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS
generated
vendored
Normal file
6
e2e/vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
approvers:
|
||||
- logicalhan
|
957
e2e/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
generated
vendored
Normal file
957
e2e/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
generated
vendored
Normal file
@ -0,0 +1,957 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
|
||||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilsets "k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/endpoints/responsewriter"
|
||||
compbasemetrics "k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
// resettableCollector is the interface implemented by prometheus.MetricVec
|
||||
// that can be used by Prometheus to collect metrics and reset their values.
|
||||
type resettableCollector interface {
|
||||
compbasemetrics.Registerable
|
||||
Reset()
|
||||
}
|
||||
|
||||
const (
|
||||
APIServerComponent string = "apiserver"
|
||||
OtherRequestMethod string = "other"
|
||||
)
|
||||
|
||||
/*
|
||||
* By default, all the following metrics are defined as falling under
|
||||
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
|
||||
*
|
||||
* Promoting the stability level of the metric is a responsibility of the component owner, since it
|
||||
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
|
||||
* the metric stability policy.
|
||||
*/
|
||||
var (
|
||||
deprecatedRequestGauge = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "requested_deprecated_apis",
|
||||
Help: "Gauge of deprecated APIs that have been requested, broken out by API group, version, resource, subresource, and removed_release.",
|
||||
StabilityLevel: compbasemetrics.STABLE,
|
||||
},
|
||||
[]string{"group", "version", "resource", "subresource", "removed_release"},
|
||||
)
|
||||
|
||||
// TODO(a-robinson): Add unit tests for the handling of these metrics once
|
||||
// the upstream library supports it.
|
||||
requestCounter = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_total",
|
||||
Help: "Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, and HTTP response code.",
|
||||
StabilityLevel: compbasemetrics.STABLE,
|
||||
},
|
||||
[]string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "code"},
|
||||
)
|
||||
longRunningRequestsGauge = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "longrunning_requests",
|
||||
Help: "Gauge of all active long-running apiserver requests broken out by verb, group, version, resource, scope and component. Not all requests are tracked this way.",
|
||||
StabilityLevel: compbasemetrics.STABLE,
|
||||
},
|
||||
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component"},
|
||||
)
|
||||
requestLatencies = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_duration_seconds",
|
||||
Help: "Response latency distribution in seconds for each verb, dry run value, group, version, resource, subresource, scope and component.",
|
||||
// This metric is used for verifying api call latencies SLO,
|
||||
// as well as tracking regressions in this aspects.
|
||||
// Thus we customize buckets significantly, to empower both usecases.
|
||||
Buckets: []float64{0.005, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3,
|
||||
4, 5, 6, 8, 10, 15, 20, 30, 45, 60},
|
||||
StabilityLevel: compbasemetrics.STABLE,
|
||||
},
|
||||
[]string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component"},
|
||||
)
|
||||
requestSloLatencies = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_slo_duration_seconds",
|
||||
Help: "Response latency distribution (not counting webhook duration and priority & fairness queue wait times) in seconds for each verb, group, version, resource, subresource, scope and component.",
|
||||
// This metric is supplementary to the requestLatencies metric.
|
||||
// It measures request duration excluding webhooks as they are mostly
|
||||
// dependant on user configuration.
|
||||
Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3,
|
||||
4, 5, 6, 8, 10, 15, 20, 30, 45, 60},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
DeprecatedVersion: "1.27.0",
|
||||
},
|
||||
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component"},
|
||||
)
|
||||
requestSliLatencies = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_sli_duration_seconds",
|
||||
Help: "Response latency distribution (not counting webhook duration and priority & fairness queue wait times) in seconds for each verb, group, version, resource, subresource, scope and component.",
|
||||
// This metric is supplementary to the requestLatencies metric.
|
||||
// It measures request duration excluding webhooks as they are mostly
|
||||
// dependant on user configuration.
|
||||
Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3,
|
||||
4, 5, 6, 8, 10, 15, 20, 30, 45, 60},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component"},
|
||||
)
|
||||
fieldValidationRequestLatencies = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Name: "field_validation_request_duration_seconds",
|
||||
Help: "Response latency distribution in seconds for each field validation value",
|
||||
// This metric is supplementary to the requestLatencies metric.
|
||||
// It measures request durations for the various field validation
|
||||
// values.
|
||||
Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3,
|
||||
4, 5, 6, 8, 10, 15, 20, 30, 45, 60},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"field_validation"},
|
||||
)
|
||||
responseSizes = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "response_sizes",
|
||||
Help: "Response size distribution in bytes for each group, version, verb, resource, subresource, scope and component.",
|
||||
// Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB).
|
||||
Buckets: compbasemetrics.ExponentialBuckets(1000, 10.0, 7),
|
||||
StabilityLevel: compbasemetrics.STABLE,
|
||||
},
|
||||
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component"},
|
||||
)
|
||||
// TLSHandshakeErrors is a number of requests dropped with 'TLS handshake error from' error
|
||||
TLSHandshakeErrors = compbasemetrics.NewCounter(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "tls_handshake_errors_total",
|
||||
Help: "Number of requests dropped with 'TLS handshake error from' error",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
)
|
||||
WatchEvents = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "watch_events_total",
|
||||
Help: "Number of events sent in watch clients",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"group", "version", "kind"},
|
||||
)
|
||||
WatchEventsSizes = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "watch_events_sizes",
|
||||
Help: "Watch event size distribution in bytes",
|
||||
Buckets: compbasemetrics.ExponentialBuckets(1024, 2.0, 8), // 1K, 2K, 4K, 8K, ..., 128K.
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"group", "version", "kind"},
|
||||
)
|
||||
// Because of volatility of the base metric this is pre-aggregated one. Instead of reporting current usage all the time
|
||||
// it reports maximal usage during the last second.
|
||||
currentInflightRequests = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "current_inflight_requests",
|
||||
Help: "Maximal number of currently used inflight request limit of this apiserver per request kind in last second.",
|
||||
StabilityLevel: compbasemetrics.STABLE,
|
||||
},
|
||||
[]string{"request_kind"},
|
||||
)
|
||||
currentInqueueRequests = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "current_inqueue_requests",
|
||||
Help: "Maximal number of queued requests in this apiserver per request kind in last second.",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"request_kind"},
|
||||
)
|
||||
|
||||
requestTerminationsTotal = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_terminations_total",
|
||||
Help: "Number of requests which apiserver terminated in self-defense.",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"},
|
||||
)
|
||||
|
||||
apiSelfRequestCounter = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "selfrequest_total",
|
||||
Help: "Counter of apiserver self-requests broken out for each verb, API resource and subresource.",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"verb", "resource", "subresource"},
|
||||
)
|
||||
|
||||
requestFilterDuration = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_filter_duration_seconds",
|
||||
Help: "Request filter latency distribution in seconds, for each filter type",
|
||||
Buckets: []float64{0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0, 5.0, 10.0, 15.0, 30.0},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"filter"},
|
||||
)
|
||||
|
||||
// requestAbortsTotal is a number of aborted requests with http.ErrAbortHandler
|
||||
requestAbortsTotal = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_aborts_total",
|
||||
Help: "Number of requests which apiserver aborted possibly due to a timeout, for each group, version, verb, resource, subresource and scope",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"verb", "group", "version", "resource", "subresource", "scope"},
|
||||
)
|
||||
|
||||
// requestPostTimeoutTotal tracks the activity of the executing request handler after the associated request
|
||||
// has been timed out by the apiserver.
|
||||
// source: the name of the handler that is recording this metric. Currently, we have two:
|
||||
// - timeout-handler: the "executing" handler returns after the timeout filter times out the request.
|
||||
// - rest-handler: the "executing" handler returns after the rest layer times out the request.
|
||||
// status: whether the handler panicked or threw an error, possible values:
|
||||
// - 'panic': the handler panicked
|
||||
// - 'error': the handler return an error
|
||||
// - 'ok': the handler returned a result (no error and no panic)
|
||||
// - 'pending': the handler is still running in the background and it did not return
|
||||
// within the wait threshold.
|
||||
requestPostTimeoutTotal = compbasemetrics.NewCounterVec(
|
||||
&compbasemetrics.CounterOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_post_timeout_total",
|
||||
Help: "Tracks the activity of the request handlers after the associated requests have been timed out by the apiserver",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"source", "status"},
|
||||
)
|
||||
|
||||
requestTimestampComparisonDuration = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "request_timestamp_comparison_time",
|
||||
Help: "Time taken for comparison of old vs new objects in UPDATE or PATCH requests",
|
||||
Buckets: []float64{0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0, 5.0},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
// Path the code takes to reach a conclusion:
|
||||
// i.e. unequalObjectsFast, unequalObjectsSlow, equalObjectsSlow
|
||||
[]string{"code_path"},
|
||||
)
|
||||
|
||||
watchListLatencies = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Subsystem: APIServerComponent,
|
||||
Name: "watch_list_duration_seconds",
|
||||
Help: "Response latency distribution in seconds for watch list requests broken by group, version, resource and scope.",
|
||||
Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 2, 4, 6, 8, 10, 15, 20, 30, 45, 60},
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{"group", "version", "resource", "scope"},
|
||||
)
|
||||
|
||||
metrics = []resettableCollector{
|
||||
deprecatedRequestGauge,
|
||||
requestCounter,
|
||||
longRunningRequestsGauge,
|
||||
requestLatencies,
|
||||
requestSloLatencies,
|
||||
requestSliLatencies,
|
||||
fieldValidationRequestLatencies,
|
||||
responseSizes,
|
||||
TLSHandshakeErrors,
|
||||
WatchEvents,
|
||||
WatchEventsSizes,
|
||||
currentInflightRequests,
|
||||
currentInqueueRequests,
|
||||
requestTerminationsTotal,
|
||||
apiSelfRequestCounter,
|
||||
requestFilterDuration,
|
||||
requestAbortsTotal,
|
||||
requestPostTimeoutTotal,
|
||||
requestTimestampComparisonDuration,
|
||||
watchListLatencies,
|
||||
}
|
||||
|
||||
// these are the valid request methods which we report in our metrics. Any other request methods
|
||||
// will be aggregated under 'unknown'
|
||||
validRequestMethods = utilsets.NewString(
|
||||
"APPLY",
|
||||
"CONNECT",
|
||||
"CREATE",
|
||||
"DELETE",
|
||||
"DELETECOLLECTION",
|
||||
"GET",
|
||||
"LIST",
|
||||
"PATCH",
|
||||
"POST",
|
||||
"PROXY",
|
||||
"PUT",
|
||||
"UPDATE",
|
||||
"WATCH",
|
||||
"WATCHLIST")
|
||||
|
||||
// These are the valid connect requests which we report in our metrics.
|
||||
validConnectRequests = utilsets.NewString(
|
||||
"log",
|
||||
"exec",
|
||||
"portforward",
|
||||
"attach",
|
||||
"proxy")
|
||||
)
|
||||
|
||||
const (
|
||||
// ReadOnlyKind is a string identifying read only request kind
|
||||
ReadOnlyKind = "readOnly"
|
||||
// MutatingKind is a string identifying mutating request kind
|
||||
MutatingKind = "mutating"
|
||||
|
||||
// WaitingPhase is the phase value for a request waiting in a queue
|
||||
WaitingPhase = "waiting"
|
||||
// ExecutingPhase is the phase value for an executing request
|
||||
ExecutingPhase = "executing"
|
||||
)
|
||||
|
||||
const (
|
||||
// deprecatedAnnotationKey is a key for an audit annotation set to
|
||||
// "true" on requests made to deprecated API versions
|
||||
deprecatedAnnotationKey = "k8s.io/deprecated"
|
||||
// removedReleaseAnnotationKey is a key for an audit annotation set to
|
||||
// the target removal release, in "<major>.<minor>" format,
|
||||
// on requests made to deprecated API versions with a target removal release
|
||||
removedReleaseAnnotationKey = "k8s.io/removed-release"
|
||||
)
|
||||
|
||||
const (
|
||||
// The source that is recording the apiserver_request_post_timeout_total metric.
|
||||
// The "executing" request handler returns after the timeout filter times out the request.
|
||||
PostTimeoutSourceTimeoutHandler = "timeout-handler"
|
||||
|
||||
// The source that is recording the apiserver_request_post_timeout_total metric.
|
||||
// The "executing" request handler returns after the rest layer times out the request.
|
||||
PostTimeoutSourceRestHandler = "rest-handler"
|
||||
)
|
||||
|
||||
const (
|
||||
// The executing request handler panicked after the request had
|
||||
// been timed out by the apiserver.
|
||||
PostTimeoutHandlerPanic = "panic"
|
||||
|
||||
// The executing request handler has returned an error to the post-timeout
|
||||
// receiver after the request had been timed out by the apiserver.
|
||||
PostTimeoutHandlerError = "error"
|
||||
|
||||
// The executing request handler has returned a result to the post-timeout
|
||||
// receiver after the request had been timed out by the apiserver.
|
||||
PostTimeoutHandlerOK = "ok"
|
||||
|
||||
// The executing request handler has not panicked or returned any error/result to
|
||||
// the post-timeout receiver yet after the request had been timed out by the apiserver.
|
||||
// The post-timeout receiver gives up after waiting for certain threshold and if the
|
||||
// executing request handler has not returned yet we use the following label.
|
||||
PostTimeoutHandlerPending = "pending"
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register all metrics.
|
||||
func Register() {
|
||||
registerMetrics.Do(func() {
|
||||
for _, metric := range metrics {
|
||||
legacyregistry.MustRegister(metric)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Reset all metrics.
|
||||
func Reset() {
|
||||
for _, metric := range metrics {
|
||||
metric.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// ResetLabelAllowLists resets the label allow lists for all metrics.
|
||||
// NOTE: This is only used for testing.
|
||||
func ResetLabelAllowLists() {
|
||||
for _, metric := range metrics {
|
||||
if counterVec, ok := metric.(*compbasemetrics.CounterVec); ok {
|
||||
counterVec.ResetLabelAllowLists()
|
||||
continue
|
||||
}
|
||||
if gaugeVec, ok := metric.(*compbasemetrics.GaugeVec); ok {
|
||||
gaugeVec.ResetLabelAllowLists()
|
||||
continue
|
||||
}
|
||||
if histogramVec, ok := metric.(*compbasemetrics.HistogramVec); ok {
|
||||
histogramVec.ResetLabelAllowLists()
|
||||
continue
|
||||
}
|
||||
if summaryVec, ok := metric.(*compbasemetrics.SummaryVec); ok {
|
||||
summaryVec.ResetLabelAllowLists()
|
||||
continue
|
||||
}
|
||||
if timingHistogramVec, ok := metric.(*compbasemetrics.TimingHistogramVec); ok {
|
||||
timingHistogramVec.ResetLabelAllowLists()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateInflightRequestMetrics reports concurrency metrics classified by
|
||||
// mutating vs Readonly.
|
||||
func UpdateInflightRequestMetrics(phase string, nonmutating, mutating int) {
|
||||
for _, kc := range []struct {
|
||||
kind string
|
||||
count int
|
||||
}{{ReadOnlyKind, nonmutating}, {MutatingKind, mutating}} {
|
||||
if phase == ExecutingPhase {
|
||||
currentInflightRequests.WithLabelValues(kc.kind).Set(float64(kc.count))
|
||||
} else {
|
||||
currentInqueueRequests.WithLabelValues(kc.kind).Set(float64(kc.count))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RecordFilterLatency(ctx context.Context, name string, elapsed time.Duration) {
|
||||
requestFilterDuration.WithContext(ctx).WithLabelValues(name).Observe(elapsed.Seconds())
|
||||
}
|
||||
|
||||
func RecordTimestampComparisonLatency(codePath string, elapsed time.Duration) {
|
||||
requestTimestampComparisonDuration.WithLabelValues(codePath).Observe(elapsed.Seconds())
|
||||
}
|
||||
|
||||
func RecordRequestPostTimeout(source string, status string) {
|
||||
requestPostTimeoutTotal.WithLabelValues(source, status).Inc()
|
||||
}
|
||||
|
||||
// RecordRequestAbort records that the request was aborted possibly due to a timeout.
|
||||
func RecordRequestAbort(req *http.Request, requestInfo *request.RequestInfo) {
|
||||
if requestInfo == nil {
|
||||
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
|
||||
}
|
||||
|
||||
scope := CleanScope(requestInfo)
|
||||
reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo)
|
||||
resource := requestInfo.Resource
|
||||
subresource := requestInfo.Subresource
|
||||
group := requestInfo.APIGroup
|
||||
version := requestInfo.APIVersion
|
||||
|
||||
requestAbortsTotal.WithContext(req.Context()).WithLabelValues(reportedVerb, group, version, resource, subresource, scope).Inc()
|
||||
}
|
||||
|
||||
// RecordDroppedRequest records that the request was rejected via http.TooManyRequests.
|
||||
func RecordDroppedRequest(req *http.Request, requestInfo *request.RequestInfo, component string, isMutatingRequest bool) {
|
||||
if requestInfo == nil {
|
||||
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
|
||||
}
|
||||
scope := CleanScope(requestInfo)
|
||||
dryRun := cleanDryRun(req.URL)
|
||||
|
||||
// We don't use verb from <requestInfo>, as this may be propagated from
|
||||
// InstrumentRouteFunc which is registered in installer.go with predefined
|
||||
// list of verbs (different than those translated to RequestInfo).
|
||||
// However, we need to tweak it e.g. to differentiate GET from LIST.
|
||||
reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo)
|
||||
|
||||
if requestInfo.IsResourceRequest {
|
||||
requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(http.StatusTooManyRequests)).Inc()
|
||||
} else {
|
||||
requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, "", "", "", requestInfo.Subresource, scope, component, codeToString(http.StatusTooManyRequests)).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// RecordRequestTermination records that the request was terminated early as part of a resource
|
||||
// preservation or apiserver self-defense mechanism (e.g. timeouts, maxinflight throttling,
|
||||
// proxyHandler errors). RecordRequestTermination should only be called zero or one times
|
||||
// per request.
|
||||
func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInfo, component string, code int) {
|
||||
if requestInfo == nil {
|
||||
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
|
||||
}
|
||||
scope := CleanScope(requestInfo)
|
||||
|
||||
// We don't use verb from <requestInfo>, as this may be propagated from
|
||||
// InstrumentRouteFunc which is registered in installer.go with predefined
|
||||
// list of verbs (different than those translated to RequestInfo).
|
||||
// However, we need to tweak it e.g. to differentiate GET from LIST.
|
||||
reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo)
|
||||
|
||||
if requestInfo.IsResourceRequest {
|
||||
requestTerminationsTotal.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc()
|
||||
} else {
|
||||
requestTerminationsTotal.WithContext(req.Context()).WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// RecordLongRunning tracks the execution of a long running request against the API server. It provides an accurate count
|
||||
// of the total number of open long running requests. requestInfo may be nil if the caller is not in the normal request flow.
|
||||
func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, component string, fn func()) {
|
||||
if requestInfo == nil {
|
||||
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
|
||||
}
|
||||
var g compbasemetrics.GaugeMetric
|
||||
scope := CleanScope(requestInfo)
|
||||
|
||||
// We don't use verb from <requestInfo>, as this may be propagated from
|
||||
// InstrumentRouteFunc which is registered in installer.go with predefined
|
||||
// list of verbs (different than those translated to RequestInfo).
|
||||
// However, we need to tweak it e.g. to differentiate GET from LIST.
|
||||
reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), "", req, requestInfo)
|
||||
|
||||
if requestInfo.IsResourceRequest {
|
||||
g = longRunningRequestsGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component)
|
||||
} else {
|
||||
g = longRunningRequestsGauge.WithContext(req.Context()).WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component)
|
||||
}
|
||||
g.Inc()
|
||||
defer g.Dec()
|
||||
fn()
|
||||
}
|
||||
|
||||
// RecordWatchListLatency simply records response latency for watch list requests.
|
||||
func RecordWatchListLatency(ctx context.Context, gvr schema.GroupVersionResource, metricsScope string) {
|
||||
requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to measure watchlist latency because no received ts found in the ctx, gvr: %s", gvr))
|
||||
return
|
||||
}
|
||||
elapsedSeconds := time.Since(requestReceivedTimestamp).Seconds()
|
||||
|
||||
watchListLatencies.WithContext(ctx).WithLabelValues(gvr.Group, gvr.Version, gvr.Resource, metricsScope).Observe(elapsedSeconds)
|
||||
}
|
||||
|
||||
// MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record
|
||||
// a request. verb must be uppercase to be backwards compatible with existing monitoring tooling.
|
||||
func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, httpCode, respSize int, elapsed time.Duration) {
|
||||
requestInfo, ok := request.RequestInfoFrom(req.Context())
|
||||
if !ok || requestInfo == nil {
|
||||
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
|
||||
}
|
||||
// We don't use verb from <requestInfo>, as this may be propagated from
|
||||
// InstrumentRouteFunc which is registered in installer.go with predefined
|
||||
// list of verbs (different than those translated to RequestInfo).
|
||||
// However, we need to tweak it e.g. to differentiate GET from LIST.
|
||||
reportedVerb := cleanVerb(CanonicalVerb(strings.ToUpper(req.Method), scope), verb, req, requestInfo)
|
||||
|
||||
dryRun := cleanDryRun(req.URL)
|
||||
elapsedSeconds := elapsed.Seconds()
|
||||
requestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, codeToString(httpCode)).Inc()
|
||||
// MonitorRequest happens after authentication, so we can trust the username given by the request
|
||||
info, ok := request.UserFrom(req.Context())
|
||||
if ok && info.GetName() == user.APIServerUser {
|
||||
apiSelfRequestCounter.WithContext(req.Context()).WithLabelValues(reportedVerb, resource, subresource).Inc()
|
||||
}
|
||||
if deprecated {
|
||||
deprecatedRequestGauge.WithContext(req.Context()).WithLabelValues(group, version, resource, subresource, removedRelease).Set(1)
|
||||
audit.AddAuditAnnotation(req.Context(), deprecatedAnnotationKey, "true")
|
||||
if len(removedRelease) > 0 {
|
||||
audit.AddAuditAnnotation(req.Context(), removedReleaseAnnotationKey, removedRelease)
|
||||
}
|
||||
}
|
||||
requestLatencies.WithContext(req.Context()).WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component).Observe(elapsedSeconds)
|
||||
fieldValidation := cleanFieldValidation(req.URL)
|
||||
fieldValidationRequestLatencies.WithContext(req.Context()).WithLabelValues(fieldValidation)
|
||||
|
||||
if wd, ok := request.LatencyTrackersFrom(req.Context()); ok {
|
||||
sliLatency := elapsedSeconds - (wd.MutatingWebhookTracker.GetLatency() + wd.ValidatingWebhookTracker.GetLatency() + wd.APFQueueWaitTracker.GetLatency()).Seconds()
|
||||
requestSloLatencies.WithContext(req.Context()).WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(sliLatency)
|
||||
requestSliLatencies.WithContext(req.Context()).WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(sliLatency)
|
||||
}
|
||||
// We are only interested in response sizes of read requests.
|
||||
if verb == "GET" || verb == "LIST" {
|
||||
responseSizes.WithContext(req.Context()).WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(float64(respSize))
|
||||
}
|
||||
}
|
||||
|
||||
// InstrumentRouteFunc works like Prometheus' InstrumentHandlerFunc but wraps
|
||||
// the go-restful RouteFunction instead of a HandlerFunc plus some Kubernetes endpoint specific information.
|
||||
func InstrumentRouteFunc(verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, routeFunc restful.RouteFunction) restful.RouteFunction {
|
||||
return restful.RouteFunction(func(req *restful.Request, response *restful.Response) {
|
||||
requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(req.Request.Context())
|
||||
if !ok {
|
||||
requestReceivedTimestamp = time.Now()
|
||||
}
|
||||
|
||||
delegate := &ResponseWriterDelegator{ResponseWriter: response.ResponseWriter}
|
||||
|
||||
rw := responsewriter.WrapForHTTP1Or2(delegate)
|
||||
response.ResponseWriter = rw
|
||||
|
||||
routeFunc(req, response)
|
||||
|
||||
MonitorRequest(req.Request, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Status(), delegate.ContentLength(), time.Since(requestReceivedTimestamp))
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandlerFunc works like Prometheus' InstrumentHandlerFunc but adds some Kubernetes endpoint specific information.
|
||||
func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, handler http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(req.Context())
|
||||
if !ok {
|
||||
requestReceivedTimestamp = time.Now()
|
||||
}
|
||||
|
||||
delegate := &ResponseWriterDelegator{ResponseWriter: w}
|
||||
w = responsewriter.WrapForHTTP1Or2(delegate)
|
||||
|
||||
handler(w, req)
|
||||
|
||||
MonitorRequest(req, verb, group, version, resource, subresource, scope, component, deprecated, removedRelease, delegate.Status(), delegate.ContentLength(), time.Since(requestReceivedTimestamp))
|
||||
}
|
||||
}
|
||||
|
||||
// NormalizedVerb returns normalized verb
|
||||
func NormalizedVerb(req *http.Request) string {
|
||||
verb := req.Method
|
||||
requestInfo, ok := request.RequestInfoFrom(req.Context())
|
||||
if ok {
|
||||
// If we can find a requestInfo, we can get a scope, and then
|
||||
// we can convert GETs to LISTs when needed.
|
||||
scope := CleanScope(requestInfo)
|
||||
verb = CanonicalVerb(strings.ToUpper(verb), scope)
|
||||
}
|
||||
|
||||
// mark APPLY requests, WATCH requests and CONNECT requests correctly.
|
||||
return CleanVerb(verb, req, requestInfo)
|
||||
}
|
||||
|
||||
// CleanScope returns the scope of the request.
|
||||
func CleanScope(requestInfo *request.RequestInfo) string {
|
||||
if requestInfo.Name != "" || requestInfo.Verb == "create" {
|
||||
return "resource"
|
||||
}
|
||||
if requestInfo.Namespace != "" {
|
||||
return "namespace"
|
||||
}
|
||||
if requestInfo.IsResourceRequest {
|
||||
return "cluster"
|
||||
}
|
||||
// this is the empty scope
|
||||
return ""
|
||||
}
|
||||
|
||||
// CleanListScope computes the request scope for metrics.
|
||||
//
|
||||
// Note that normally we would use CleanScope for computation.
|
||||
// But due to the same reasons mentioned in determineRequestNamespaceAndName we cannot.
|
||||
func CleanListScope(ctx context.Context, opts *metainternalversion.ListOptions) string {
|
||||
namespace, name := determineRequestNamespaceAndName(ctx, opts)
|
||||
if len(name) > 0 {
|
||||
return "resource"
|
||||
}
|
||||
if len(namespace) > 0 {
|
||||
return "namespace"
|
||||
}
|
||||
if requestInfo, ok := request.RequestInfoFrom(ctx); ok {
|
||||
if requestInfo.IsResourceRequest {
|
||||
return "cluster"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CanonicalVerb distinguishes LISTs from GETs (and HEADs). It assumes verb is
|
||||
// UPPERCASE.
|
||||
func CanonicalVerb(verb string, scope string) string {
|
||||
switch verb {
|
||||
case "GET", "HEAD":
|
||||
if scope != "resource" && scope != "" {
|
||||
return "LIST"
|
||||
}
|
||||
return "GET"
|
||||
default:
|
||||
return verb
|
||||
}
|
||||
}
|
||||
|
||||
// CleanVerb returns a normalized verb, so that it is easy to tell WATCH from
|
||||
// LIST, APPLY from PATCH and CONNECT from others.
|
||||
func CleanVerb(verb string, request *http.Request, requestInfo *request.RequestInfo) string {
|
||||
reportedVerb := verb
|
||||
if suggestedVerb := getVerbIfWatch(request); suggestedVerb == "WATCH" {
|
||||
reportedVerb = "WATCH"
|
||||
}
|
||||
// normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics
|
||||
if verb == "WATCHLIST" {
|
||||
reportedVerb = "WATCH"
|
||||
}
|
||||
if verb == "PATCH" && request.Header.Get("Content-Type") == string(types.ApplyPatchType) {
|
||||
reportedVerb = "APPLY"
|
||||
}
|
||||
if requestInfo != nil && requestInfo.IsResourceRequest && len(requestInfo.Subresource) > 0 && validConnectRequests.Has(requestInfo.Subresource) {
|
||||
reportedVerb = "CONNECT"
|
||||
}
|
||||
return reportedVerb
|
||||
}
|
||||
|
||||
// determineRequestNamespaceAndName computes name and namespace for the given requests
|
||||
//
|
||||
// note that the logic of this function was copy&pasted from cacher.go
|
||||
// after an unsuccessful attempt of moving it to RequestInfo
|
||||
//
|
||||
// see: https://github.com/kubernetes/kubernetes/pull/120520
|
||||
func determineRequestNamespaceAndName(ctx context.Context, opts *metainternalversion.ListOptions) (namespace, name string) {
|
||||
if requestNamespace, ok := request.NamespaceFrom(ctx); ok && len(requestNamespace) > 0 {
|
||||
namespace = requestNamespace
|
||||
} else if opts != nil && opts.FieldSelector != nil {
|
||||
if selectorNamespace, ok := opts.FieldSelector.RequiresExactMatch("metadata.namespace"); ok {
|
||||
namespace = selectorNamespace
|
||||
}
|
||||
}
|
||||
if requestInfo, ok := request.RequestInfoFrom(ctx); ok && requestInfo != nil && len(requestInfo.Name) > 0 {
|
||||
name = requestInfo.Name
|
||||
} else if opts != nil && opts.FieldSelector != nil {
|
||||
if selectorName, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok {
|
||||
name = selectorName
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// cleanVerb additionally ensures that unknown verbs don't clog up the metrics.
|
||||
func cleanVerb(verb, suggestedVerb string, request *http.Request, requestInfo *request.RequestInfo) string {
|
||||
// CanonicalVerb (being an input for this function) doesn't handle correctly the
|
||||
// deprecated path pattern for watch of:
|
||||
// GET /api/{version}/watch/{resource}
|
||||
// We correct it manually based on the pass verb from the installer.
|
||||
if suggestedVerb == "WATCH" || suggestedVerb == "WATCHLIST" {
|
||||
return "WATCH"
|
||||
}
|
||||
reportedVerb := CleanVerb(verb, request, requestInfo)
|
||||
if validRequestMethods.Has(reportedVerb) {
|
||||
return reportedVerb
|
||||
}
|
||||
return OtherRequestMethod
|
||||
}
|
||||
|
||||
// getVerbIfWatch additionally ensures that GET or List would be transformed to WATCH
|
||||
func getVerbIfWatch(req *http.Request) string {
|
||||
if strings.ToUpper(req.Method) == "GET" || strings.ToUpper(req.Method) == "LIST" {
|
||||
// see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool
|
||||
if values := req.URL.Query()["watch"]; len(values) > 0 {
|
||||
if value := strings.ToLower(values[0]); value != "0" && value != "false" {
|
||||
return "WATCH"
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cleanDryRun(u *url.URL) string {
|
||||
// avoid allocating when we don't see dryRun in the query
|
||||
if !strings.Contains(u.RawQuery, "dryRun") {
|
||||
return ""
|
||||
}
|
||||
dryRun := u.Query()["dryRun"]
|
||||
if errs := validation.ValidateDryRun(nil, dryRun); len(errs) > 0 {
|
||||
return "invalid"
|
||||
}
|
||||
// Since dryRun could be valid with any arbitrarily long length
|
||||
// we have to dedup and sort the elements before joining them together
|
||||
// TODO: this is a fairly large allocation for what it does, consider
|
||||
// a sort and dedup in a single pass
|
||||
return strings.Join(utilsets.NewString(dryRun...).List(), ",")
|
||||
}
|
||||
|
||||
func cleanFieldValidation(u *url.URL) string {
|
||||
// avoid allocating when we don't see dryRun in the query
|
||||
if !strings.Contains(u.RawQuery, "fieldValidation") {
|
||||
return ""
|
||||
}
|
||||
fieldValidation := u.Query()["fieldValidation"]
|
||||
if len(fieldValidation) != 1 {
|
||||
return "invalid"
|
||||
}
|
||||
if errs := validation.ValidateFieldValidation(nil, fieldValidation[0]); len(errs) > 0 {
|
||||
return "invalid"
|
||||
}
|
||||
return fieldValidation[0]
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = (*ResponseWriterDelegator)(nil)
|
||||
var _ responsewriter.UserProvidedDecorator = (*ResponseWriterDelegator)(nil)
|
||||
|
||||
// ResponseWriterDelegator interface wraps http.ResponseWriter to additionally record content-length, status-code, etc.
|
||||
type ResponseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (r *ResponseWriterDelegator) Unwrap() http.ResponseWriter {
|
||||
return r.ResponseWriter
|
||||
}
|
||||
|
||||
func (r *ResponseWriterDelegator) WriteHeader(code int) {
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (r *ResponseWriterDelegator) Write(b []byte) (int, error) {
|
||||
if !r.wroteHeader {
|
||||
r.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := r.ResponseWriter.Write(b)
|
||||
r.written += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *ResponseWriterDelegator) Status() int {
|
||||
return r.status
|
||||
}
|
||||
|
||||
func (r *ResponseWriterDelegator) ContentLength() int {
|
||||
return int(r.written)
|
||||
}
|
||||
|
||||
// Small optimization over Itoa
|
||||
func codeToString(s int) string {
|
||||
switch s {
|
||||
case 100:
|
||||
return "100"
|
||||
case 101:
|
||||
return "101"
|
||||
|
||||
case 200:
|
||||
return "200"
|
||||
case 201:
|
||||
return "201"
|
||||
case 202:
|
||||
return "202"
|
||||
case 203:
|
||||
return "203"
|
||||
case 204:
|
||||
return "204"
|
||||
case 205:
|
||||
return "205"
|
||||
case 206:
|
||||
return "206"
|
||||
|
||||
case 300:
|
||||
return "300"
|
||||
case 301:
|
||||
return "301"
|
||||
case 302:
|
||||
return "302"
|
||||
case 304:
|
||||
return "304"
|
||||
case 305:
|
||||
return "305"
|
||||
case 307:
|
||||
return "307"
|
||||
|
||||
case 400:
|
||||
return "400"
|
||||
case 401:
|
||||
return "401"
|
||||
case 402:
|
||||
return "402"
|
||||
case 403:
|
||||
return "403"
|
||||
case 404:
|
||||
return "404"
|
||||
case 405:
|
||||
return "405"
|
||||
case 406:
|
||||
return "406"
|
||||
case 407:
|
||||
return "407"
|
||||
case 408:
|
||||
return "408"
|
||||
case 409:
|
||||
return "409"
|
||||
case 410:
|
||||
return "410"
|
||||
case 411:
|
||||
return "411"
|
||||
case 412:
|
||||
return "412"
|
||||
case 413:
|
||||
return "413"
|
||||
case 414:
|
||||
return "414"
|
||||
case 415:
|
||||
return "415"
|
||||
case 416:
|
||||
return "416"
|
||||
case 417:
|
||||
return "417"
|
||||
case 418:
|
||||
return "418"
|
||||
|
||||
case 500:
|
||||
return "500"
|
||||
case 501:
|
||||
return "501"
|
||||
case 502:
|
||||
return "502"
|
||||
case 503:
|
||||
return "503"
|
||||
case 504:
|
||||
return "504"
|
||||
case 505:
|
||||
return "505"
|
||||
|
||||
case 428:
|
||||
return "428"
|
||||
case 429:
|
||||
return "429"
|
||||
case 431:
|
||||
return "431"
|
||||
case 511:
|
||||
return "511"
|
||||
|
||||
default:
|
||||
return strconv.Itoa(s)
|
||||
}
|
||||
}
|
191
e2e/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go
generated
vendored
Normal file
191
e2e/vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kube-openapi/pkg/util"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
var verbs = util.NewTrie([]string{"get", "log", "read", "replace", "patch", "delete", "deletecollection", "watch", "connect", "proxy", "list", "create", "patch"})
|
||||
|
||||
const (
|
||||
extensionGVK = "x-kubernetes-group-version-kind"
|
||||
)
|
||||
|
||||
// ToValidOperationID makes an string a valid op ID (e.g. removing punctuations and whitespaces and make it camel case)
|
||||
func ToValidOperationID(s string, capitalizeFirstLetter bool) string {
|
||||
var buffer bytes.Buffer
|
||||
capitalize := capitalizeFirstLetter
|
||||
for i, r := range s {
|
||||
if unicode.IsLetter(r) || r == '_' || (i != 0 && unicode.IsDigit(r)) {
|
||||
if capitalize {
|
||||
buffer.WriteRune(unicode.ToUpper(r))
|
||||
capitalize = false
|
||||
} else {
|
||||
buffer.WriteRune(r)
|
||||
}
|
||||
} else {
|
||||
capitalize = true
|
||||
}
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// GetOperationIDAndTags returns a customize operation ID and a list of tags for kubernetes API server's OpenAPI spec to prevent duplicate IDs.
|
||||
func GetOperationIDAndTags(r *restful.Route) (string, []string, error) {
|
||||
op := r.Operation
|
||||
path := r.Path
|
||||
var tags []string
|
||||
prefix, exists := verbs.GetPrefix(op)
|
||||
if !exists {
|
||||
return op, tags, fmt.Errorf("operation names should start with a verb. Cannot determine operation verb from %v", op)
|
||||
}
|
||||
op = op[len(prefix):]
|
||||
parts := strings.Split(strings.Trim(path, "/"), "/")
|
||||
// Assume /api is /apis/core, remove this when we actually server /api/... on /apis/core/...
|
||||
if len(parts) >= 1 && parts[0] == "api" {
|
||||
parts = append([]string{"apis", "core"}, parts[1:]...)
|
||||
}
|
||||
if len(parts) >= 2 && parts[0] == "apis" {
|
||||
trimmed := strings.TrimSuffix(parts[1], ".k8s.io")
|
||||
prefix = prefix + ToValidOperationID(trimmed, prefix != "")
|
||||
tag := ToValidOperationID(trimmed, false)
|
||||
if len(parts) > 2 {
|
||||
prefix = prefix + ToValidOperationID(parts[2], prefix != "")
|
||||
tag = tag + "_" + ToValidOperationID(parts[2], false)
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
} else if len(parts) >= 1 {
|
||||
tags = append(tags, ToValidOperationID(parts[0], false))
|
||||
}
|
||||
return prefix + ToValidOperationID(op, prefix != ""), tags, nil
|
||||
}
|
||||
|
||||
type groupVersionKinds []v1.GroupVersionKind
|
||||
|
||||
func (s groupVersionKinds) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s groupVersionKinds) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s groupVersionKinds) Less(i, j int) bool {
|
||||
if s[i].Group == s[j].Group {
|
||||
if s[i].Version == s[j].Version {
|
||||
return s[i].Kind < s[j].Kind
|
||||
}
|
||||
return s[i].Version < s[j].Version
|
||||
}
|
||||
return s[i].Group < s[j].Group
|
||||
}
|
||||
|
||||
func (s groupVersionKinds) JSON() []interface{} {
|
||||
j := []interface{}{}
|
||||
for _, gvk := range s {
|
||||
j = append(j, map[string]interface{}{
|
||||
"group": gvk.Group,
|
||||
"version": gvk.Version,
|
||||
"kind": gvk.Kind,
|
||||
})
|
||||
}
|
||||
return j
|
||||
}
|
||||
|
||||
// DefinitionNamer is the type to customize OpenAPI definition name.
|
||||
type DefinitionNamer struct {
|
||||
typeGroupVersionKinds map[string]groupVersionKinds
|
||||
}
|
||||
|
||||
func gvkConvert(gvk schema.GroupVersionKind) v1.GroupVersionKind {
|
||||
return v1.GroupVersionKind{
|
||||
Group: gvk.Group,
|
||||
Version: gvk.Version,
|
||||
Kind: gvk.Kind,
|
||||
}
|
||||
}
|
||||
|
||||
func friendlyName(name string) string {
|
||||
nameParts := strings.Split(name, "/")
|
||||
// Reverse first part. e.g., io.k8s... instead of k8s.io...
|
||||
if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
|
||||
parts := strings.Split(nameParts[0], ".")
|
||||
for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
|
||||
parts[i], parts[j] = parts[j], parts[i]
|
||||
}
|
||||
nameParts[0] = strings.Join(parts, ".")
|
||||
}
|
||||
return strings.Join(nameParts, ".")
|
||||
}
|
||||
|
||||
func typeName(t reflect.Type) string {
|
||||
path := t.PkgPath()
|
||||
if strings.Contains(path, "/vendor/") {
|
||||
path = path[strings.Index(path, "/vendor/")+len("/vendor/"):]
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", path, t.Name())
|
||||
}
|
||||
|
||||
// NewDefinitionNamer constructs a new DefinitionNamer to be used to customize OpenAPI spec.
|
||||
func NewDefinitionNamer(schemes ...*runtime.Scheme) *DefinitionNamer {
|
||||
ret := &DefinitionNamer{
|
||||
typeGroupVersionKinds: map[string]groupVersionKinds{},
|
||||
}
|
||||
for _, s := range schemes {
|
||||
for gvk, rtype := range s.AllKnownTypes() {
|
||||
newGVK := gvkConvert(gvk)
|
||||
exists := false
|
||||
for _, existingGVK := range ret.typeGroupVersionKinds[typeName(rtype)] {
|
||||
if newGVK == existingGVK {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
ret.typeGroupVersionKinds[typeName(rtype)] = append(ret.typeGroupVersionKinds[typeName(rtype)], newGVK)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, gvk := range ret.typeGroupVersionKinds {
|
||||
sort.Sort(gvk)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// GetDefinitionName returns the name and tags for a given definition
|
||||
func (d *DefinitionNamer) GetDefinitionName(name string) (string, spec.Extensions) {
|
||||
if groupVersionKinds, ok := d.typeGroupVersionKinds[name]; ok {
|
||||
return friendlyName(name), spec.Extensions{
|
||||
extensionGVK: groupVersionKinds.JSON(),
|
||||
}
|
||||
}
|
||||
return friendlyName(name), nil
|
||||
}
|
4
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS
generated
vendored
Normal file
4
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- sttts
|
78
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go
generated
vendored
Normal file
78
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
)
|
||||
|
||||
// The key type is unexported to prevent collisions
|
||||
type key int
|
||||
|
||||
const (
|
||||
// namespaceKey is the context key for the request namespace.
|
||||
namespaceKey key = iota
|
||||
|
||||
// userKey is the context key for the request user.
|
||||
userKey
|
||||
)
|
||||
|
||||
// NewContext instantiates a base context object for request flows.
|
||||
func NewContext() context.Context {
|
||||
return context.TODO()
|
||||
}
|
||||
|
||||
// NewDefaultContext instantiates a base context object for request flows in the default namespace
|
||||
func NewDefaultContext() context.Context {
|
||||
return WithNamespace(NewContext(), metav1.NamespaceDefault)
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is val.
|
||||
func WithValue(parent context.Context, key interface{}, val interface{}) context.Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
||||
// WithNamespace returns a copy of parent in which the namespace value is set
|
||||
func WithNamespace(parent context.Context, namespace string) context.Context {
|
||||
return WithValue(parent, namespaceKey, namespace)
|
||||
}
|
||||
|
||||
// NamespaceFrom returns the value of the namespace key on the ctx
|
||||
func NamespaceFrom(ctx context.Context) (string, bool) {
|
||||
namespace, ok := ctx.Value(namespaceKey).(string)
|
||||
return namespace, ok
|
||||
}
|
||||
|
||||
// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none
|
||||
func NamespaceValue(ctx context.Context) string {
|
||||
namespace, _ := NamespaceFrom(ctx)
|
||||
return namespace
|
||||
}
|
||||
|
||||
// WithUser returns a copy of parent in which the user value is set
|
||||
func WithUser(parent context.Context, user user.Info) context.Context {
|
||||
return WithValue(parent, userKey, user)
|
||||
}
|
||||
|
||||
// UserFrom returns the value of the user key on the ctx
|
||||
func UserFrom(ctx context.Context) (user.Info, bool) {
|
||||
user, ok := ctx.Value(userKey).(user.Info)
|
||||
return user, ok
|
||||
}
|
20
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go
generated
vendored
Normal file
20
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package request contains everything around extracting info from
|
||||
// a http request object.
|
||||
// TODO: this package is temporary. Handlers must move into pkg/apiserver/handlers to avoid dependency cycle
|
||||
package request // import "k8s.io/apiserver/pkg/endpoints/request"
|
45
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go
generated
vendored
Normal file
45
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type requestReceivedTimestampKeyType int
|
||||
|
||||
// requestReceivedTimestampKey is the ReceivedTimestamp (the time the request reached the apiserver)
|
||||
// key for the context.
|
||||
const requestReceivedTimestampKey requestReceivedTimestampKeyType = iota
|
||||
|
||||
// WithReceivedTimestamp returns a copy of parent context in which the ReceivedTimestamp
|
||||
// (the time the request reached the apiserver) is set.
|
||||
//
|
||||
// If the specified ReceivedTimestamp is zero, no value is set and the parent context is returned as is.
|
||||
func WithReceivedTimestamp(parent context.Context, receivedTimestamp time.Time) context.Context {
|
||||
if receivedTimestamp.IsZero() {
|
||||
return parent
|
||||
}
|
||||
return WithValue(parent, requestReceivedTimestampKey, receivedTimestamp)
|
||||
}
|
||||
|
||||
// ReceivedTimestampFrom returns the value of the ReceivedTimestamp key from the specified context.
|
||||
func ReceivedTimestampFrom(ctx context.Context) (time.Time, bool) {
|
||||
info, ok := ctx.Value(requestReceivedTimestampKey).(time.Time)
|
||||
return info, ok
|
||||
}
|
305
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go
generated
vendored
Normal file
305
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go
generated
vendored
Normal file
@ -0,0 +1,305 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/validation/path"
|
||||
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
|
||||
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// LongRunningRequestCheck is a predicate which is true for long-running http requests.
|
||||
type LongRunningRequestCheck func(r *http.Request, requestInfo *RequestInfo) bool
|
||||
|
||||
type RequestInfoResolver interface {
|
||||
NewRequestInfo(req *http.Request) (*RequestInfo, error)
|
||||
}
|
||||
|
||||
// RequestInfo holds information parsed from the http.Request
|
||||
type RequestInfo struct {
|
||||
// IsResourceRequest indicates whether or not the request is for an API resource or subresource
|
||||
IsResourceRequest bool
|
||||
// Path is the URL path of the request
|
||||
Path string
|
||||
// Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch.
|
||||
// for non-resource requests, this is the lowercase http verb
|
||||
Verb string
|
||||
|
||||
APIPrefix string
|
||||
APIGroup string
|
||||
APIVersion string
|
||||
Namespace string
|
||||
// Resource is the name of the resource being requested. This is not the kind. For example: pods
|
||||
Resource string
|
||||
// Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind.
|
||||
// For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod"
|
||||
// (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding".
|
||||
Subresource string
|
||||
// Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in.
|
||||
Name string
|
||||
// Parts are the path parts for the request, always starting with /{resource}/{name}
|
||||
Parts []string
|
||||
|
||||
// FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver
|
||||
// honors field selectors for the verb this request is associated with.
|
||||
FieldSelector string
|
||||
// LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver
|
||||
// honors field selectors for the verb this request is associated with.
|
||||
LabelSelector string
|
||||
}
|
||||
|
||||
// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal
|
||||
// CRUDdy GET/POST/PUT/DELETE actions on REST objects.
|
||||
// TODO: find a way to keep this up to date automatically. Maybe dynamically populate list as handlers added to
|
||||
// master's Mux.
|
||||
var specialVerbs = sets.NewString("proxy", "watch")
|
||||
|
||||
// specialVerbsNoSubresources contains root verbs which do not allow subresources
|
||||
var specialVerbsNoSubresources = sets.NewString("proxy")
|
||||
|
||||
// namespaceSubresources contains subresources of namespace
|
||||
// this list allows the parser to distinguish between a namespace subresource, and a namespaced resource
|
||||
var namespaceSubresources = sets.NewString("status", "finalize")
|
||||
|
||||
// verbsWithSelectors is the list of verbs which support fieldSelector and labelSelector parameters
|
||||
var verbsWithSelectors = sets.NewString("list", "watch", "deletecollection")
|
||||
|
||||
// NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift
|
||||
var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...)
|
||||
|
||||
type RequestInfoFactory struct {
|
||||
APIPrefixes sets.String // without leading and trailing slashes
|
||||
GrouplessAPIPrefixes sets.String // without leading and trailing slashes
|
||||
}
|
||||
|
||||
// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses
|
||||
// NewRequestInfo returns the information from the http request. If error is not nil, RequestInfo holds the information as best it is known before the failure
|
||||
// It handles both resource and non-resource requests and fills in all the pertinent information for each.
|
||||
// Valid Inputs:
|
||||
// Resource paths
|
||||
// /apis/{api-group}/{version}/namespaces
|
||||
// /api/{version}/namespaces
|
||||
// /api/{version}/namespaces/{namespace}
|
||||
// /api/{version}/namespaces/{namespace}/{resource}
|
||||
// /api/{version}/namespaces/{namespace}/{resource}/{resourceName}
|
||||
// /api/{version}/{resource}
|
||||
// /api/{version}/{resource}/{resourceName}
|
||||
//
|
||||
// Special verbs without subresources:
|
||||
// /api/{version}/proxy/{resource}/{resourceName}
|
||||
// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName}
|
||||
//
|
||||
// Special verbs with subresources:
|
||||
// /api/{version}/watch/{resource}
|
||||
// /api/{version}/watch/namespaces/{namespace}/{resource}
|
||||
//
|
||||
// NonResource paths
|
||||
// /apis/{api-group}/{version}
|
||||
// /apis/{api-group}
|
||||
// /apis
|
||||
// /api/{version}
|
||||
// /api
|
||||
// /healthz
|
||||
// /
|
||||
func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, error) {
|
||||
// start with a non-resource request until proven otherwise
|
||||
requestInfo := RequestInfo{
|
||||
IsResourceRequest: false,
|
||||
Path: req.URL.Path,
|
||||
Verb: strings.ToLower(req.Method),
|
||||
}
|
||||
|
||||
currentParts := splitPath(req.URL.Path)
|
||||
if len(currentParts) < 3 {
|
||||
// return a non-resource request
|
||||
return &requestInfo, nil
|
||||
}
|
||||
|
||||
if !r.APIPrefixes.Has(currentParts[0]) {
|
||||
// return a non-resource request
|
||||
return &requestInfo, nil
|
||||
}
|
||||
requestInfo.APIPrefix = currentParts[0]
|
||||
currentParts = currentParts[1:]
|
||||
|
||||
if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) {
|
||||
// one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?"
|
||||
if len(currentParts) < 3 {
|
||||
// return a non-resource request
|
||||
return &requestInfo, nil
|
||||
}
|
||||
|
||||
requestInfo.APIGroup = currentParts[0]
|
||||
currentParts = currentParts[1:]
|
||||
}
|
||||
|
||||
requestInfo.IsResourceRequest = true
|
||||
requestInfo.APIVersion = currentParts[0]
|
||||
currentParts = currentParts[1:]
|
||||
|
||||
// handle input of form /{specialVerb}/*
|
||||
verbViaPathPrefix := false
|
||||
if specialVerbs.Has(currentParts[0]) {
|
||||
if len(currentParts) < 2 {
|
||||
return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL)
|
||||
}
|
||||
|
||||
requestInfo.Verb = currentParts[0]
|
||||
currentParts = currentParts[1:]
|
||||
verbViaPathPrefix = true
|
||||
|
||||
} else {
|
||||
switch req.Method {
|
||||
case "POST":
|
||||
requestInfo.Verb = "create"
|
||||
case "GET", "HEAD":
|
||||
requestInfo.Verb = "get"
|
||||
case "PUT":
|
||||
requestInfo.Verb = "update"
|
||||
case "PATCH":
|
||||
requestInfo.Verb = "patch"
|
||||
case "DELETE":
|
||||
requestInfo.Verb = "delete"
|
||||
default:
|
||||
requestInfo.Verb = ""
|
||||
}
|
||||
}
|
||||
|
||||
// URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind
|
||||
if currentParts[0] == "namespaces" {
|
||||
if len(currentParts) > 1 {
|
||||
requestInfo.Namespace = currentParts[1]
|
||||
|
||||
// if there is another step after the namespace name and it is not a known namespace subresource
|
||||
// move currentParts to include it as a resource in its own right
|
||||
if len(currentParts) > 2 && !namespaceSubresources.Has(currentParts[2]) {
|
||||
currentParts = currentParts[2:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
requestInfo.Namespace = metav1.NamespaceNone
|
||||
}
|
||||
|
||||
// parsing successful, so we now know the proper value for .Parts
|
||||
requestInfo.Parts = currentParts
|
||||
|
||||
// parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret
|
||||
switch {
|
||||
case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb):
|
||||
requestInfo.Subresource = requestInfo.Parts[2]
|
||||
fallthrough
|
||||
case len(requestInfo.Parts) >= 2:
|
||||
requestInfo.Name = requestInfo.Parts[1]
|
||||
fallthrough
|
||||
case len(requestInfo.Parts) >= 1:
|
||||
requestInfo.Resource = requestInfo.Parts[0]
|
||||
}
|
||||
|
||||
// if there's no name on the request and we thought it was a get before, then the actual verb is a list or a watch
|
||||
if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" {
|
||||
opts := metainternalversion.ListOptions{}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil {
|
||||
// An error in parsing request will result in default to "list" and not setting "name" field.
|
||||
klog.ErrorS(err, "Couldn't parse request", "request", req.URL.Query())
|
||||
// Reset opts to not rely on partial results from parsing.
|
||||
// However, if watch is set, let's report it.
|
||||
opts = metainternalversion.ListOptions{}
|
||||
if values := req.URL.Query()["watch"]; len(values) > 0 {
|
||||
switch strings.ToLower(values[0]) {
|
||||
case "false", "0":
|
||||
default:
|
||||
opts.Watch = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if opts.Watch {
|
||||
requestInfo.Verb = "watch"
|
||||
} else {
|
||||
requestInfo.Verb = "list"
|
||||
}
|
||||
|
||||
if opts.FieldSelector != nil {
|
||||
if name, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok {
|
||||
if len(path.IsValidPathSegmentName(name)) == 0 {
|
||||
requestInfo.Name = name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection
|
||||
if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" {
|
||||
requestInfo.Verb = "deletecollection"
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
|
||||
// Don't support selector authorization on requests that used the deprecated verb-via-path mechanism, since they don't support selectors consistently.
|
||||
// There are multi-object and single-object watch endpoints, and only the multi-object one supports selectors.
|
||||
if !verbViaPathPrefix && verbsWithSelectors.Has(requestInfo.Verb) {
|
||||
// interestingly these are parsed above, but the current structure there means that if one (or anything) in the
|
||||
// listOptions fails to decode, the field and label selectors are lost.
|
||||
// therefore, do the straight query param read here.
|
||||
if vals := req.URL.Query()["fieldSelector"]; len(vals) > 0 {
|
||||
requestInfo.FieldSelector = vals[0]
|
||||
}
|
||||
if vals := req.URL.Query()["labelSelector"]; len(vals) > 0 {
|
||||
requestInfo.LabelSelector = vals[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &requestInfo, nil
|
||||
}
|
||||
|
||||
type requestInfoKeyType int
|
||||
|
||||
// requestInfoKey is the RequestInfo key for the context. It's of private type here. Because
|
||||
// keys are interfaces and interfaces are equal when the type and the value is equal, this
|
||||
// does not conflict with the keys defined in pkg/api.
|
||||
const requestInfoKey requestInfoKeyType = iota
|
||||
|
||||
// WithRequestInfo returns a copy of parent in which the request info value is set
|
||||
func WithRequestInfo(parent context.Context, info *RequestInfo) context.Context {
|
||||
return WithValue(parent, requestInfoKey, info)
|
||||
}
|
||||
|
||||
// RequestInfoFrom returns the value of the RequestInfo key on the ctx
|
||||
func RequestInfoFrom(ctx context.Context) (*RequestInfo, bool) {
|
||||
info, ok := ctx.Value(requestInfoKey).(*RequestInfo)
|
||||
return info, ok
|
||||
}
|
||||
|
||||
// splitPath returns the segments for a URL path.
|
||||
func splitPath(path string) []string {
|
||||
path = strings.Trim(path, "/")
|
||||
if path == "" {
|
||||
return []string{}
|
||||
}
|
||||
return strings.Split(path, "/")
|
||||
}
|
55
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go
generated
vendored
Normal file
55
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// The serverShutdownSignalKeyType type is unexported to prevent collisions
|
||||
type serverShutdownSignalKeyType int
|
||||
|
||||
// serverShutdownSignalKey is the context key for storing the
|
||||
// watch termination interface instance for a WATCH request.
|
||||
const serverShutdownSignalKey serverShutdownSignalKeyType = iota
|
||||
|
||||
// ServerShutdownSignal is associated with the request context so
|
||||
// the request handler logic has access to signals rlated to
|
||||
// the server shutdown events
|
||||
type ServerShutdownSignal interface {
|
||||
// Signaled when the apiserver is not receiving any new request
|
||||
ShuttingDown() <-chan struct{}
|
||||
}
|
||||
|
||||
// ServerShutdownSignalFrom returns the ServerShutdownSignal instance
|
||||
// associated with the request context.
|
||||
// If there is no ServerShutdownSignal asscoaied with the context,
|
||||
// nil is returned.
|
||||
func ServerShutdownSignalFrom(ctx context.Context) ServerShutdownSignal {
|
||||
ev, _ := ctx.Value(serverShutdownSignalKey).(ServerShutdownSignal)
|
||||
return ev
|
||||
}
|
||||
|
||||
// WithServerShutdownSignal returns a new context that stores
|
||||
// the ServerShutdownSignal interface instance.
|
||||
func WithServerShutdownSignal(parent context.Context, window ServerShutdownSignal) context.Context {
|
||||
if ServerShutdownSignalFrom(parent) != nil {
|
||||
return parent // Avoid double registering.
|
||||
}
|
||||
|
||||
return context.WithValue(parent, serverShutdownSignalKey, window)
|
||||
}
|
311
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go
generated
vendored
Normal file
311
e2e/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go
generated
vendored
Normal file
@ -0,0 +1,311 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
func sumDuration(d1 time.Duration, d2 time.Duration) time.Duration {
|
||||
return d1 + d2
|
||||
}
|
||||
|
||||
func maxDuration(d1 time.Duration, d2 time.Duration) time.Duration {
|
||||
if d1 > d2 {
|
||||
return d1
|
||||
}
|
||||
return d2
|
||||
}
|
||||
|
||||
// DurationTracker is a simple interface for tracking functions duration,
|
||||
// it is safe for concurrent use by multiple goroutines.
|
||||
type DurationTracker interface {
|
||||
// Track measures time spent in the given function f and
|
||||
// aggregates measured duration using aggregateFunction.
|
||||
// if Track is invoked with f from multiple goroutines concurrently,
|
||||
// then f must be safe to be invoked concurrently by multiple goroutines.
|
||||
Track(f func())
|
||||
|
||||
// TrackDuration tracks latency from the specified duration
|
||||
// and aggregate it using aggregateFunction
|
||||
TrackDuration(time.Duration)
|
||||
|
||||
// GetLatency returns the total latency incurred so far
|
||||
GetLatency() time.Duration
|
||||
}
|
||||
|
||||
// durationTracker implements DurationTracker by measuring function time
|
||||
// using given clock and aggregates the duration using given aggregate function
|
||||
type durationTracker struct {
|
||||
clock clock.Clock
|
||||
latency time.Duration
|
||||
mu sync.Mutex
|
||||
aggregateFunction func(time.Duration, time.Duration) time.Duration
|
||||
}
|
||||
|
||||
// Track measures time spent in given function and aggregates measured
|
||||
// duration using aggregateFunction
|
||||
func (t *durationTracker) Track(f func()) {
|
||||
startedAt := t.clock.Now()
|
||||
defer func() {
|
||||
duration := t.clock.Since(startedAt)
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.latency = t.aggregateFunction(t.latency, duration)
|
||||
}()
|
||||
|
||||
f()
|
||||
}
|
||||
|
||||
// TrackDuration tracks latency from the given duration
|
||||
// using aggregateFunction
|
||||
func (t *durationTracker) TrackDuration(d time.Duration) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.latency = t.aggregateFunction(t.latency, d)
|
||||
}
|
||||
|
||||
// GetLatency returns aggregated latency tracked by a tracker
|
||||
func (t *durationTracker) GetLatency() time.Duration {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.latency
|
||||
}
|
||||
|
||||
func newSumLatencyTracker(c clock.Clock) DurationTracker {
|
||||
return &durationTracker{
|
||||
clock: c,
|
||||
aggregateFunction: sumDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newMaxLatencyTracker(c clock.Clock) DurationTracker {
|
||||
return &durationTracker{
|
||||
clock: c,
|
||||
aggregateFunction: maxDuration,
|
||||
}
|
||||
}
|
||||
|
||||
// LatencyTrackers stores trackers used to measure latecny incurred in
|
||||
// components within the apiserver.
|
||||
type LatencyTrackers struct {
|
||||
// MutatingWebhookTracker tracks the latency incurred in mutating webhook(s).
|
||||
// Since mutating webhooks are done sequentially, latency
|
||||
// is aggregated using sum function.
|
||||
MutatingWebhookTracker DurationTracker
|
||||
|
||||
// ValidatingWebhookTracker tracks the latency incurred in validating webhook(s).
|
||||
// Validate webhooks are done in parallel, so max function is used.
|
||||
ValidatingWebhookTracker DurationTracker
|
||||
|
||||
// APFQueueWaitTracker tracks the latency incurred by queue wait times
|
||||
// from priority & fairness.
|
||||
APFQueueWaitTracker DurationTracker
|
||||
|
||||
// StorageTracker tracks the latency incurred inside the storage layer,
|
||||
// it accounts for the time it takes to send data to the underlying
|
||||
// storage layer (etcd) and get the complete response back.
|
||||
// If a request involves N (N>=1) round trips to the underlying
|
||||
// stogare layer, the latency will account for the total duration
|
||||
// from these N round trips.
|
||||
// It does not include the time incurred in admission, or validation.
|
||||
StorageTracker DurationTracker
|
||||
|
||||
// TransformTracker tracks the latency incurred in transforming the
|
||||
// response object(s) returned from the underlying storage layer.
|
||||
// This includes transforming the object to user's desired form
|
||||
// (ie. as Table), and also setting appropriate API level fields.
|
||||
// This does not include the latency incurred in serialization
|
||||
// (json or protobuf) of the response object or writing
|
||||
// of it to the http ResponseWriter object.
|
||||
TransformTracker DurationTracker
|
||||
|
||||
// SerializationTracker tracks the latency incurred in serialization
|
||||
// (json or protobuf) of the response object.
|
||||
// NOTE: serialization and writing of the serialized raw bytes to the
|
||||
// associated http ResponseWriter object are interleaved, and hence
|
||||
// the latency measured here will include the time spent writing the
|
||||
// serialized raw bytes to the http ResponseWriter object.
|
||||
SerializationTracker DurationTracker
|
||||
|
||||
// ResponseWriteTracker tracks the latency incurred in writing the
|
||||
// serialized raw bytes to the http ResponseWriter object (via the
|
||||
// Write method) associated with the request.
|
||||
// The Write method can be invoked multiple times, so we use a
|
||||
// latency tracker that sums up the duration from each call.
|
||||
ResponseWriteTracker DurationTracker
|
||||
|
||||
// DecodeTracker is used to track latency incurred inside the function
|
||||
// that takes an object returned from the underlying storage layer
|
||||
// (etcd) and performs decoding of the response object.
|
||||
// When called multiple times, the latency incurred inside to
|
||||
// decode func each time will be summed up.
|
||||
DecodeTracker DurationTracker
|
||||
}
|
||||
|
||||
type latencyTrackersKeyType int
|
||||
|
||||
// latencyTrackersKey is the key that associates a LatencyTrackers
|
||||
// instance with the request context.
|
||||
const latencyTrackersKey latencyTrackersKeyType = iota
|
||||
|
||||
// WithLatencyTrackers returns a copy of parent context to which an
|
||||
// instance of LatencyTrackers is added.
|
||||
func WithLatencyTrackers(parent context.Context) context.Context {
|
||||
return WithLatencyTrackersAndCustomClock(parent, clock.RealClock{})
|
||||
}
|
||||
|
||||
// WithLatencyTrackersAndCustomClock returns a copy of parent context to which
|
||||
// an instance of LatencyTrackers is added. Tracers use given clock.
|
||||
func WithLatencyTrackersAndCustomClock(parent context.Context, c clock.Clock) context.Context {
|
||||
return WithValue(parent, latencyTrackersKey, &LatencyTrackers{
|
||||
MutatingWebhookTracker: newSumLatencyTracker(c),
|
||||
ValidatingWebhookTracker: newMaxLatencyTracker(c),
|
||||
APFQueueWaitTracker: newMaxLatencyTracker(c),
|
||||
StorageTracker: newSumLatencyTracker(c),
|
||||
TransformTracker: newSumLatencyTracker(c),
|
||||
SerializationTracker: newSumLatencyTracker(c),
|
||||
ResponseWriteTracker: newSumLatencyTracker(c),
|
||||
DecodeTracker: newSumLatencyTracker(c),
|
||||
})
|
||||
}
|
||||
|
||||
// LatencyTrackersFrom returns the associated LatencyTrackers instance
|
||||
// from the specified context.
|
||||
func LatencyTrackersFrom(ctx context.Context) (*LatencyTrackers, bool) {
|
||||
wd, ok := ctx.Value(latencyTrackersKey).(*LatencyTrackers)
|
||||
return wd, ok && wd != nil
|
||||
}
|
||||
|
||||
// TrackTransformResponseObjectLatency is used to track latency incurred
|
||||
// inside the function that takes an object returned from the underlying
|
||||
// storage layer (etcd) and performs any necessary transformations
|
||||
// of the response object. This does not include the latency incurred in
|
||||
// serialization (json or protobuf) of the response object or writing of
|
||||
// it to the http ResponseWriter object.
|
||||
// When called multiple times, the latency incurred inside the
|
||||
// transform func each time will be summed up.
|
||||
func TrackTransformResponseObjectLatency(ctx context.Context, transform func()) {
|
||||
if tracker, ok := LatencyTrackersFrom(ctx); ok {
|
||||
tracker.TransformTracker.Track(transform)
|
||||
return
|
||||
}
|
||||
|
||||
transform()
|
||||
}
|
||||
|
||||
// TrackStorageLatency is used to track latency incurred
|
||||
// inside the underlying storage layer.
|
||||
// When called multiple times, the latency provided will be summed up.
|
||||
func TrackStorageLatency(ctx context.Context, d time.Duration) {
|
||||
if tracker, ok := LatencyTrackersFrom(ctx); ok {
|
||||
tracker.StorageTracker.TrackDuration(d)
|
||||
}
|
||||
}
|
||||
|
||||
// TrackSerializeResponseObjectLatency is used to track latency incurred in
|
||||
// serialization (json or protobuf) of the response object.
|
||||
// When called multiple times, the latency provided will be summed up.
|
||||
func TrackSerializeResponseObjectLatency(ctx context.Context, f func()) {
|
||||
if tracker, ok := LatencyTrackersFrom(ctx); ok {
|
||||
tracker.SerializationTracker.Track(f)
|
||||
return
|
||||
}
|
||||
|
||||
f()
|
||||
}
|
||||
|
||||
// TrackResponseWriteLatency is used to track latency incurred in writing
|
||||
// the serialized raw bytes to the http ResponseWriter object (via the
|
||||
// Write method) associated with the request.
|
||||
// When called multiple times, the latency provided will be summed up.
|
||||
func TrackResponseWriteLatency(ctx context.Context, d time.Duration) {
|
||||
if tracker, ok := LatencyTrackersFrom(ctx); ok {
|
||||
tracker.ResponseWriteTracker.TrackDuration(d)
|
||||
}
|
||||
}
|
||||
|
||||
// TrackAPFQueueWaitLatency is used to track latency incurred
|
||||
// by priority and fairness queues.
|
||||
func TrackAPFQueueWaitLatency(ctx context.Context, d time.Duration) {
|
||||
if tracker, ok := LatencyTrackersFrom(ctx); ok {
|
||||
tracker.APFQueueWaitTracker.TrackDuration(d)
|
||||
}
|
||||
}
|
||||
|
||||
// TrackDecodeLatency is used to track latency incurred inside the function
|
||||
// that takes an object returned from the underlying storage layer
|
||||
// (etcd) and performs decoding of the response object.
|
||||
// When called multiple times, the latency incurred inside to
|
||||
// decode func each time will be summed up.
|
||||
func TrackDecodeLatency(ctx context.Context, d time.Duration) {
|
||||
if tracker, ok := LatencyTrackersFrom(ctx); ok {
|
||||
tracker.DecodeTracker.TrackDuration(d)
|
||||
}
|
||||
}
|
||||
|
||||
// AuditAnnotationsFromLatencyTrackers will inspect each latency tracker
|
||||
// associated with the request context and return a set of audit
|
||||
// annotations that can be added to the API audit entry.
|
||||
func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string {
|
||||
const (
|
||||
transformLatencyKey = "apiserver.latency.k8s.io/transform-response-object"
|
||||
storageLatencyKey = "apiserver.latency.k8s.io/etcd"
|
||||
serializationLatencyKey = "apiserver.latency.k8s.io/serialize-response-object"
|
||||
responseWriteLatencyKey = "apiserver.latency.k8s.io/response-write"
|
||||
mutatingWebhookLatencyKey = "apiserver.latency.k8s.io/mutating-webhook"
|
||||
validatingWebhookLatencyKey = "apiserver.latency.k8s.io/validating-webhook"
|
||||
decodeLatencyKey = "apiserver.latency.k8s.io/decode-response-object"
|
||||
apfQueueWaitLatencyKey = "apiserver.latency.k8s.io/apf-queue-wait"
|
||||
)
|
||||
|
||||
tracker, ok := LatencyTrackersFrom(ctx)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
annotations := map[string]string{}
|
||||
if latency := tracker.TransformTracker.GetLatency(); latency != 0 {
|
||||
annotations[transformLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.StorageTracker.GetLatency(); latency != 0 {
|
||||
annotations[storageLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.SerializationTracker.GetLatency(); latency != 0 {
|
||||
annotations[serializationLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.ResponseWriteTracker.GetLatency(); latency != 0 {
|
||||
annotations[responseWriteLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.MutatingWebhookTracker.GetLatency(); latency != 0 {
|
||||
annotations[mutatingWebhookLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.ValidatingWebhookTracker.GetLatency(); latency != 0 {
|
||||
annotations[validatingWebhookLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.DecodeTracker.GetLatency(); latency != 0 {
|
||||
annotations[decodeLatencyKey] = latency.String()
|
||||
}
|
||||
if latency := tracker.APFQueueWaitTracker.GetLatency(); latency != 0 {
|
||||
annotations[apfQueueWaitLatencyKey] = latency.String()
|
||||
}
|
||||
return annotations
|
||||
}
|
54
e2e/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/fake.go
generated
vendored
Normal file
54
e2e/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/fake.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package responsewriter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var _ http.ResponseWriter = &FakeResponseWriter{}
|
||||
|
||||
// FakeResponseWriter implements http.ResponseWriter,
|
||||
// it is used for testing purpose only
|
||||
type FakeResponseWriter struct{}
|
||||
|
||||
func (fw *FakeResponseWriter) Header() http.Header { return http.Header{} }
|
||||
func (fw *FakeResponseWriter) WriteHeader(code int) {}
|
||||
func (fw *FakeResponseWriter) Write(bs []byte) (int, error) { return len(bs), nil }
|
||||
|
||||
// For HTTP2 an http.ResponseWriter object implements
|
||||
// http.Flusher and http.CloseNotifier.
|
||||
// It is used for testing purpose only
|
||||
type FakeResponseWriterFlusherCloseNotifier struct {
|
||||
*FakeResponseWriter
|
||||
}
|
||||
|
||||
func (fw *FakeResponseWriterFlusherCloseNotifier) Flush() {}
|
||||
func (fw *FakeResponseWriterFlusherCloseNotifier) CloseNotify() <-chan bool { return nil }
|
||||
|
||||
// For HTTP/1.x an http.ResponseWriter object implements
|
||||
// http.Flusher, http.CloseNotifier and http.Hijacker.
|
||||
// It is used for testing purpose only
|
||||
type FakeResponseWriterFlusherCloseNotifierHijacker struct {
|
||||
*FakeResponseWriterFlusherCloseNotifier
|
||||
}
|
||||
|
||||
func (fw *FakeResponseWriterFlusherCloseNotifierHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return nil, nil, nil
|
||||
}
|
180
e2e/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go
generated
vendored
Normal file
180
e2e/vendor/k8s.io/apiserver/pkg/endpoints/responsewriter/wrapper.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package responsewriter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// UserProvidedDecorator represensts a user (client that uses this package)
|
||||
// provided decorator that wraps an inner http.ResponseWriter object.
|
||||
// The user-provided decorator object must return the inner (decorated)
|
||||
// http.ResponseWriter object via the Unwrap function.
|
||||
type UserProvidedDecorator interface {
|
||||
http.ResponseWriter
|
||||
|
||||
// Unwrap returns the inner http.ResponseWriter object associated
|
||||
// with the user-provided decorator.
|
||||
Unwrap() http.ResponseWriter
|
||||
}
|
||||
|
||||
// WrapForHTTP1Or2 accepts a user-provided decorator of an "inner" http.responseWriter
|
||||
// object and potentially wraps the user-provided decorator with a new http.ResponseWriter
|
||||
// object that implements http.CloseNotifier, http.Flusher, and/or http.Hijacker by
|
||||
// delegating to the user-provided decorator (if it implements the relevant method) or
|
||||
// the inner http.ResponseWriter (otherwise), so that the returned http.ResponseWriter
|
||||
// object implements the same subset of those interfaces as the inner http.ResponseWriter.
|
||||
//
|
||||
// This function handles the following three casses.
|
||||
// - The inner ResponseWriter implements `http.CloseNotifier`, `http.Flusher`,
|
||||
// and `http.Hijacker` (an HTTP/1.1 sever provides such a ResponseWriter).
|
||||
// - The inner ResponseWriter implements `http.CloseNotifier` and `http.Flusher`
|
||||
// but not `http.Hijacker` (an HTTP/2 server provides such a ResponseWriter).
|
||||
// - All the other cases collapse to this one, in which the given ResponseWriter is returned.
|
||||
//
|
||||
// There are three applicable terms:
|
||||
// - "outer": this is the ResponseWriter object returned by the WrapForHTTP1Or2 function.
|
||||
// - "user-provided decorator" or "middle": this is the user-provided decorator
|
||||
// that decorates an inner ResponseWriter object. A user-provided decorator
|
||||
// implements the UserProvidedDecorator interface. A user-provided decorator
|
||||
// may or may not implement http.CloseNotifier, http.Flusher or http.Hijacker.
|
||||
// - "inner": the ResponseWriter that the user-provided decorator extends.
|
||||
func WrapForHTTP1Or2(decorator UserProvidedDecorator) http.ResponseWriter {
|
||||
// from go net/http documentation:
|
||||
// The default HTTP/1.x and HTTP/2 ResponseWriter implementations support Flusher
|
||||
// Handlers should always test for this ability at runtime.
|
||||
//
|
||||
// The Hijacker interface is implemented by ResponseWriters that allow an HTTP handler
|
||||
// to take over the connection.
|
||||
// The default ResponseWriter for HTTP/1.x connections supports Hijacker, but HTTP/2 connections
|
||||
// intentionally do not. ResponseWriter wrappers may also not support Hijacker.
|
||||
// Handlers should always test for this ability at runtime
|
||||
//
|
||||
// The CloseNotifier interface is implemented by ResponseWriters which allow detecting
|
||||
// when the underlying connection has gone away.
|
||||
// Deprecated: the CloseNotifier interface predates Go's context package.
|
||||
// New code should use Request.Context instead.
|
||||
inner := decorator.Unwrap()
|
||||
if innerNotifierFlusher, ok := inner.(CloseNotifierFlusher); ok {
|
||||
// for HTTP/2 request, the default ResponseWriter object (http2responseWriter)
|
||||
// implements Flusher and CloseNotifier.
|
||||
outerHTTP2 := outerWithCloseNotifyAndFlush{
|
||||
UserProvidedDecorator: decorator,
|
||||
InnerCloseNotifierFlusher: innerNotifierFlusher,
|
||||
}
|
||||
|
||||
if innerHijacker, hijackable := inner.(http.Hijacker); hijackable {
|
||||
// for HTTP/1.x request the default implementation of ResponseWriter
|
||||
// also implement CloseNotifier, Flusher and Hijacker
|
||||
return &outerWithCloseNotifyFlushAndHijack{
|
||||
outerWithCloseNotifyAndFlush: outerHTTP2,
|
||||
InnerHijacker: innerHijacker,
|
||||
}
|
||||
}
|
||||
|
||||
return outerHTTP2
|
||||
}
|
||||
|
||||
// we should never be here for either http/1.x or http2 request
|
||||
return decorator
|
||||
}
|
||||
|
||||
// CloseNotifierFlusher is a combination of http.CloseNotifier and http.Flusher
|
||||
// This applies to both http/1.x and http2 requests.
|
||||
type CloseNotifierFlusher interface {
|
||||
http.CloseNotifier
|
||||
http.Flusher
|
||||
}
|
||||
|
||||
// GetOriginal goes through the chain of wrapped http.ResponseWriter objects
|
||||
// and returns the original http.ResponseWriter object provided to the first
|
||||
// request handler in the filter chain.
|
||||
func GetOriginal(w http.ResponseWriter) http.ResponseWriter {
|
||||
decorator, ok := w.(UserProvidedDecorator)
|
||||
if !ok {
|
||||
return w
|
||||
}
|
||||
|
||||
inner := decorator.Unwrap()
|
||||
if inner == w {
|
||||
// infinite cycle here, we should never be here though.
|
||||
panic("http.ResponseWriter decorator chain has a cycle")
|
||||
}
|
||||
|
||||
return GetOriginal(inner)
|
||||
}
|
||||
|
||||
//nolint:staticcheck // SA1019
|
||||
var _ http.CloseNotifier = outerWithCloseNotifyAndFlush{}
|
||||
var _ http.Flusher = outerWithCloseNotifyAndFlush{}
|
||||
var _ http.ResponseWriter = outerWithCloseNotifyAndFlush{}
|
||||
var _ UserProvidedDecorator = outerWithCloseNotifyAndFlush{}
|
||||
|
||||
// outerWithCloseNotifyAndFlush is the outer object that extends the
|
||||
// user provied decorator with http.CloseNotifier and http.Flusher only.
|
||||
type outerWithCloseNotifyAndFlush struct {
|
||||
// UserProvidedDecorator is the user-provided object, it decorates
|
||||
// an inner ResponseWriter object.
|
||||
UserProvidedDecorator
|
||||
|
||||
// http.CloseNotifier and http.Flusher for the inner object
|
||||
InnerCloseNotifierFlusher CloseNotifierFlusher
|
||||
}
|
||||
|
||||
func (wr outerWithCloseNotifyAndFlush) CloseNotify() <-chan bool {
|
||||
if notifier, ok := wr.UserProvidedDecorator.(http.CloseNotifier); ok {
|
||||
return notifier.CloseNotify()
|
||||
}
|
||||
|
||||
return wr.InnerCloseNotifierFlusher.CloseNotify()
|
||||
}
|
||||
|
||||
func (wr outerWithCloseNotifyAndFlush) Flush() {
|
||||
if flusher, ok := wr.UserProvidedDecorator.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
return
|
||||
}
|
||||
|
||||
wr.InnerCloseNotifierFlusher.Flush()
|
||||
}
|
||||
|
||||
//lint:file-ignore SA1019 Keep supporting deprecated http.CloseNotifier
|
||||
var _ http.CloseNotifier = outerWithCloseNotifyFlushAndHijack{}
|
||||
var _ http.Flusher = outerWithCloseNotifyFlushAndHijack{}
|
||||
var _ http.Hijacker = outerWithCloseNotifyFlushAndHijack{}
|
||||
var _ http.ResponseWriter = outerWithCloseNotifyFlushAndHijack{}
|
||||
var _ UserProvidedDecorator = outerWithCloseNotifyFlushAndHijack{}
|
||||
|
||||
// outerWithCloseNotifyFlushAndHijack is the outer object that extends the
|
||||
// user-provided decorator with http.CloseNotifier, http.Flusher and http.Hijacker.
|
||||
// This applies to http/1.x requests only.
|
||||
type outerWithCloseNotifyFlushAndHijack struct {
|
||||
outerWithCloseNotifyAndFlush
|
||||
|
||||
// http.Hijacker for the inner object
|
||||
InnerHijacker http.Hijacker
|
||||
}
|
||||
|
||||
func (wr outerWithCloseNotifyFlushAndHijack) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if hijacker, ok := wr.UserProvidedDecorator.(http.Hijacker); ok {
|
||||
return hijacker.Hijack()
|
||||
}
|
||||
|
||||
return wr.InnerHijacker.Hijack()
|
||||
}
|
39
e2e/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go
generated
vendored
Normal file
39
e2e/vendor/k8s.io/apiserver/pkg/endpoints/warning/warning.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package warning
|
||||
|
||||
import (
|
||||
restful "github.com/emicklei/go-restful/v3"
|
||||
|
||||
"k8s.io/apiserver/pkg/warning"
|
||||
)
|
||||
|
||||
// AddWarningsHandler returns a handler that adds the provided warnings to all requests,
|
||||
// then delegates to the provided handler.
|
||||
func AddWarningsHandler(handler restful.RouteFunction, warnings []string) restful.RouteFunction {
|
||||
if len(warnings) == 0 {
|
||||
return handler
|
||||
}
|
||||
|
||||
return func(req *restful.Request, res *restful.Response) {
|
||||
ctx := req.Request.Context()
|
||||
for _, msg := range warnings {
|
||||
warning.AddWarning(ctx, "", msg)
|
||||
}
|
||||
handler(req, res)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user