rebase: update kubernetes dep to 1.24.0

As kubernetes 1.24.0 is released, updating
kubernetes dependencies to 1.24.0

updates: #3086

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2022-05-05 08:17:06 +05:30
committed by mergify[bot]
parent fc1529f268
commit c4f79d455f
959 changed files with 80055 additions and 27456 deletions

74
vendor/k8s.io/apimachinery/pkg/runtime/allocator.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"sync"
)
// AllocatorPool simply stores Allocator objects to avoid additional memory allocations
// by caching created but unused items for later reuse, relieving pressure on the garbage collector.
//
// Usage:
// memoryAllocator := runtime.AllocatorPool.Get().(*runtime.Allocator)
// defer runtime.AllocatorPool.Put(memoryAllocator)
//
// A note for future:
// consider introducing multiple pools for storing buffers of different sizes
// perhaps this could allow us to be more efficient.
var AllocatorPool = sync.Pool{
New: func() interface{} {
return &Allocator{}
},
}
// Allocator knows how to allocate memory
// It exists to make the cost of object serialization cheaper.
// In some cases, it allows for allocating memory only once and then reusing it.
// This approach puts less load on GC and leads to less fragmented memory in general.
type Allocator struct {
buf []byte
}
var _ MemoryAllocator = &Allocator{}
// Allocate reserves memory for n bytes only if the underlying array doesn't have enough capacity
// otherwise it returns previously allocated block of memory.
//
// Note that the returned array is not zeroed, it is the caller's
// responsibility to clean the memory if needed.
func (a *Allocator) Allocate(n uint64) []byte {
if uint64(cap(a.buf)) >= n {
a.buf = a.buf[:n]
return a.buf
}
// grow the buffer
size := uint64(2*cap(a.buf)) + n
a.buf = make([]byte, size, size)
a.buf = a.buf[:n]
return a.buf
}
// SimpleAllocator a wrapper around make([]byte)
// conforms to the MemoryAllocator interface
type SimpleAllocator struct{}
var _ MemoryAllocator = &SimpleAllocator{}
func (sa *SimpleAllocator) Allocate(n uint64) []byte {
return make([]byte, n, n)
}

View File

@ -705,24 +705,13 @@ func mapToUnstructured(sv, dv reflect.Value) error {
}
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
if st.Key().Kind() == reflect.String {
switch st.Elem().Kind() {
// TODO It should be possible to reuse the slice for primitive types.
// However, it is panicing in the following form.
// case reflect.String, reflect.Bool,
// reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
// reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// sv.Set(sv)
// return nil
default:
// We need to do a proper conversion.
}
dv.Set(reflect.MakeMap(mapStringInterfaceType))
dv = dv.Elem()
dt = dv.Type()
}
dv.Set(reflect.MakeMap(mapStringInterfaceType))
dv = dv.Elem()
dt = dv.Type()
}
if dt.Kind() != reflect.Map {
return fmt.Errorf("cannot convert struct to: %v", dt.Kind())
return fmt.Errorf("cannot convert map to: %v", dt.Kind())
}
if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) {
@ -763,20 +752,9 @@ func sliceToUnstructured(sv, dv reflect.Value) error {
return nil
}
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
switch st.Elem().Kind() {
// TODO It should be possible to reuse the slice for primitive types.
// However, it is panicing in the following form.
// case reflect.String, reflect.Bool,
// reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
// reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// sv.Set(sv)
// return nil
default:
// We need to do a proper conversion.
dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap()))
dv = dv.Elem()
dt = dv.Type()
}
dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap()))
dv = dv.Elem()
dt = dv.Type()
}
if dt.Kind() != reflect.Slice {
return fmt.Errorf("cannot convert slice to: %v", dt.Kind())

View File

@ -137,31 +137,31 @@ func init() {
}
var fileDescriptor_9d3c45d7f546725c = []byte{
// 378 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31,
0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5,
0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74,
0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65,
0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b,
0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05,
0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc,
0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b,
0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd,
0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff,
0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20,
0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64,
0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1,
0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1,
0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd,
0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98,
0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e,
0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19,
0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f,
0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3,
0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68,
0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6,
0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00,
0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00,
// 380 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0xaa, 0x13, 0x31,
0x14, 0xc6, 0x27, 0xb7, 0x85, 0x7b, 0x4d, 0x0b, 0x57, 0xe2, 0xc2, 0xd1, 0x45, 0xe6, 0xd2, 0x95,
0x77, 0x61, 0x02, 0x17, 0x04, 0xb7, 0x9d, 0x52, 0x50, 0x44, 0x90, 0xe0, 0x1f, 0x70, 0x65, 0x3a,
0x13, 0xa7, 0x61, 0xe8, 0xc9, 0x90, 0x66, 0x1c, 0xbb, 0xf3, 0x11, 0x7c, 0xac, 0x2e, 0xbb, 0xec,
0xaa, 0xd8, 0xf1, 0x21, 0xdc, 0x4a, 0xd3, 0xb4, 0x56, 0x5d, 0x74, 0x97, 0x73, 0xbe, 0xef, 0xf7,
0x9d, 0x73, 0x20, 0xf8, 0x45, 0xf9, 0x7c, 0xce, 0xb4, 0xe1, 0x65, 0x3d, 0x51, 0x16, 0x94, 0x53,
0x73, 0xfe, 0x45, 0x41, 0x6e, 0x2c, 0x0f, 0x82, 0xac, 0xf4, 0x4c, 0x66, 0x53, 0x0d, 0xca, 0x2e,
0x78, 0x55, 0x16, 0xdc, 0xd6, 0xe0, 0xf4, 0x4c, 0xf1, 0x42, 0x81, 0xb2, 0xd2, 0xa9, 0x9c, 0x55,
0xd6, 0x38, 0x43, 0x92, 0x3d, 0xc0, 0x4e, 0x01, 0x56, 0x95, 0x05, 0x0b, 0xc0, 0xe3, 0xa7, 0x85,
0x76, 0xd3, 0x7a, 0xc2, 0x32, 0x33, 0xe3, 0x85, 0x29, 0x0c, 0xf7, 0xdc, 0xa4, 0xfe, 0xec, 0x2b,
0x5f, 0xf8, 0xd7, 0x3e, 0x6f, 0x70, 0x8b, 0xfb, 0x42, 0x36, 0xe3, 0xaf, 0x4e, 0xc1, 0x5c, 0x1b,
0x20, 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0x37, 0xe8, 0x49, 0x3f, 0xbd, 0x6c, 0x37, 0x49, 0x47,
0xc8, 0x46, 0xec, 0x7a, 0x83, 0x4f, 0xf8, 0xea, 0xed, 0xa2, 0x52, 0xaf, 0x95, 0x93, 0xe4, 0x0e,
0x63, 0x59, 0xe9, 0xf7, 0xca, 0xee, 0x20, 0xef, 0xbe, 0x97, 0x92, 0xe5, 0x26, 0x89, 0xda, 0x4d,
0x82, 0x87, 0x6f, 0x5e, 0x06, 0x45, 0x9c, 0xb8, 0xc8, 0x0d, 0xee, 0x96, 0x1a, 0xf2, 0xf8, 0xc2,
0xbb, 0xfb, 0xc1, 0xdd, 0x7d, 0xa5, 0x21, 0x17, 0x5e, 0x19, 0xfc, 0x42, 0xf8, 0xf2, 0x1d, 0x94,
0x60, 0x1a, 0x20, 0x1f, 0xf0, 0x95, 0x0b, 0xd3, 0x7c, 0x7e, 0xef, 0xee, 0x96, 0x9d, 0xb9, 0x9d,
0x1d, 0xd6, 0x4b, 0xef, 0x87, 0xf0, 0xe3, 0xc2, 0xe2, 0x18, 0x76, 0xb8, 0xf0, 0xe2, 0xff, 0x0b,
0xc9, 0x10, 0x5f, 0x67, 0x06, 0x9c, 0x02, 0x37, 0x86, 0xcc, 0xe4, 0x1a, 0x8a, 0xb8, 0xe3, 0x97,
0x7d, 0x18, 0xf2, 0xae, 0x47, 0x7f, 0xcb, 0xe2, 0x5f, 0x3f, 0x79, 0x86, 0x7b, 0xa1, 0xb5, 0x1b,
0x1d, 0x77, 0x3d, 0xfe, 0x20, 0xe0, 0xbd, 0xd1, 0x1f, 0x49, 0x9c, 0xfa, 0xd2, 0xf1, 0x72, 0x4b,
0xa3, 0xd5, 0x96, 0x46, 0xeb, 0x2d, 0x8d, 0xbe, 0xb5, 0x14, 0x2d, 0x5b, 0x8a, 0x56, 0x2d, 0x45,
0xeb, 0x96, 0xa2, 0x1f, 0x2d, 0x45, 0xdf, 0x7f, 0xd2, 0xe8, 0x63, 0x72, 0xe6, 0xb7, 0xfc, 0x0e,
0x00, 0x00, 0xff, 0xff, 0x1f, 0x32, 0xd5, 0x68, 0x68, 0x02, 0x00, 0x00,
}
func (m *RawExtension) Marshal() (dAtA []byte, err error) {

View File

@ -22,7 +22,7 @@ syntax = "proto2";
package k8s.io.apimachinery.pkg.runtime;
// Package-wide variables from generator "generated".
option go_package = "runtime";
option go_package = "k8s.io/apimachinery/pkg/runtime";
// RawExtension is used to hold extensions in external versions.
//

View File

@ -69,6 +69,24 @@ type Encoder interface {
Identifier() Identifier
}
// MemoryAllocator is responsible for allocating memory.
// By encapsulating memory allocation into its own interface, we can reuse the memory
// across many operations in places we know it can significantly improve the performance.
type MemoryAllocator interface {
// Allocate reserves memory for n bytes.
// Note that implementations of this method are not required to zero the returned array.
// It is the caller's responsibility to clean the memory if needed.
Allocate(n uint64) []byte
}
// EncoderWithAllocator serializes objects in a way that allows callers to manage any additional memory allocations.
type EncoderWithAllocator interface {
Encoder
// EncodeWithAllocator writes an object to a stream as Encode does.
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization
EncodeWithAllocator(obj Object, w io.Writer, memAlloc MemoryAllocator) error
}
// Decoder attempts to load an object from data.
type Decoder interface {
// Decode attempts to deserialize the provided data using either the innate typing of the scheme or the
@ -153,7 +171,7 @@ type NegotiatedSerializer interface {
// EncoderForVersion returns an encoder that ensures objects being written to the provided
// serializer are in the provided group version.
EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder
// DecoderForVersion returns a decoder that ensures objects being read by the provided
// DecoderToVersion returns a decoder that ensures objects being read by the provided
// serializer are in the provided group version by default.
DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
}
@ -207,6 +225,12 @@ type NestedObjectEncoder interface {
// NestedObjectDecoder is an optional interface that objects may implement to be given
// an opportunity to decode any nested Objects / RawExtensions during serialization.
// It is possible for DecodeNestedObjects to return a non-nil error but for the decoding
// to have succeeded in the case of strict decoding errors (e.g. unknown/duplicate fields).
// As such it is important for callers of DecodeNestedObjects to check to confirm whether
// an error is a runtime.StrictDecodingError before short circuiting.
// Similarly, implementations of DecodeNestedObjects should ensure that a runtime.StrictDecodingError
// is only returned when the rest of decoding has succeeded.
type NestedObjectDecoder interface {
DecodeNestedObjects(d Decoder) error
}
@ -284,14 +308,11 @@ type ResourceVersioner interface {
ResourceVersion(obj Object) (string, error)
}
// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object.
type SelfLinker interface {
SetSelfLink(obj Object, selfLink string) error
SelfLink(obj Object) (string, error)
// Knowing Name is sometimes necessary to use a SelfLinker.
// Namer provides methods for retrieving name and namespace of an API object.
type Namer interface {
// Name returns the name of a given object.
Name(obj Object) (string, error)
// Knowing Namespace is sometimes necessary to use a SelfLinker
// Namespace returns the name of a given object.
Namespace(obj Object) (string, error)
}

View File

@ -43,17 +43,17 @@ func init() {
}
var fileDescriptor_0462724132518e0d = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30,
0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50,
0x5e, 0xe6, 0x24, 0x57, 0xc7, 0xb2, 0xfc, 0x47, 0x8e, 0x5d, 0xa9, 0xac, 0x8f, 0xd0, 0xc7, 0x0a,
0x0c, 0x0c, 0x6c, 0xdc, 0x17, 0xa9, 0x64, 0x07, 0x94, 0xdd, 0x4f, 0xa7, 0xcf, 0xf7, 0xf3, 0x68,
0xfe, 0x27, 0xa1, 0x3d, 0x9a, 0xdc, 0x51, 0x74, 0x94, 0x68, 0xc2, 0x0b, 0xb9, 0xc1, 0x47, 0xdc,
0x1f, 0x32, 0x68, 0x2b, 0xfb, 0x51, 0x3b, 0x8a, 0x57, 0x0c, 0x46, 0x61, 0xcc, 0x2e, 0x69, 0x4b,
0x38, 0xf5, 0x23, 0x59, 0x89, 0x8a, 0x1c, 0x45, 0x99, 0x68, 0x10, 0x21, 0xfa, 0xe4, 0xbf, 0x7e,
0x9a, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0x44, 0x73, 0xdf, 0x7f, 0x4a, 0xa7, 0x31, 0x77,
0xa2, 0xf7, 0x16, 0x95, 0x57, 0x1e, 0x2b, 0xef, 0xf2, 0xb9, 0xae, 0x3a, 0xea, 0xd5, 0xb2, 0x87,
0xdf, 0x79, 0x03, 0xb6, 0x6c, 0xc0, 0xd6, 0x0d, 0xd8, 0xad, 0x00, 0x9f, 0x0b, 0xf0, 0xa5, 0x00,
0x5f, 0x0b, 0xf0, 0x47, 0x01, 0x7e, 0x7f, 0x02, 0x3b, 0x7d, 0xb4, 0xf8, 0x2b, 0x00, 0x00, 0xff,
0xff, 0xba, 0x7e, 0x65, 0xf4, 0xd6, 0x00, 0x00, 0x00,
// 186 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xad, 0x8e, 0xc3, 0x30,
0x0c, 0xc0, 0xf1, 0x84, 0x1e, 0x3c, 0x78, 0xc0, 0xb0, 0xec, 0x62, 0x7a, 0xf8, 0xf0, 0xa4, 0xf1,
0xb1, 0xb4, 0xf5, 0xd2, 0x28, 0xca, 0x87, 0xd2, 0x64, 0xd2, 0xd8, 0x1e, 0x61, 0x8f, 0x55, 0x58,
0x58, 0xb8, 0x66, 0x2f, 0x32, 0x29, 0x2d, 0x18, 0x1c, 0xf3, 0x5f, 0xd6, 0xcf, 0xf2, 0xd7, 0xd1,
0xfc, 0x8d, 0x42, 0x7b, 0x34, 0xb9, 0xa5, 0xe8, 0x28, 0xd1, 0x88, 0x17, 0x72, 0xbd, 0x8f, 0xb8,
0x2f, 0x64, 0xd0, 0x56, 0x76, 0x83, 0x76, 0x14, 0xaf, 0x18, 0x8c, 0xc2, 0x98, 0x5d, 0xd2, 0x96,
0x70, 0xec, 0x06, 0xb2, 0x12, 0x15, 0x39, 0x8a, 0x32, 0x51, 0x2f, 0x42, 0xf4, 0xc9, 0x7f, 0x37,
0x9b, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0xc4, 0xe6, 0x7e, 0x7e, 0x95, 0x4e, 0x43, 0x6e,
0x45, 0xe7, 0x2d, 0x2a, 0xaf, 0x3c, 0x56, 0xde, 0xe6, 0x73, 0xad, 0x1a, 0x75, 0xda, 0xce, 0xfe,
0x1f, 0xa6, 0x15, 0xd8, 0xbc, 0x02, 0x5b, 0x56, 0x60, 0xb7, 0x02, 0x7c, 0x2a, 0xc0, 0xe7, 0x02,
0x7c, 0x29, 0xc0, 0x1f, 0x05, 0xf8, 0xfd, 0x09, 0xec, 0xd4, 0x7c, 0xf6, 0xf4, 0x2b, 0x00, 0x00,
0xff, 0xff, 0x12, 0xb4, 0xae, 0x48, 0xf6, 0x00, 0x00, 0x00,
}

View File

@ -22,5 +22,5 @@ syntax = "proto2";
package k8s.io.apimachinery.pkg.runtime.schema;
// Package-wide variables from generator "generated".
option go_package = "schema";
option go_package = "k8s.io/apimachinery/pkg/runtime/schema";

View File

@ -44,11 +44,11 @@ import (
// Schemes are not expected to change at runtime and are only threadsafe after
// registration is complete.
type Scheme struct {
// versionMap allows one to figure out the go type of an object with
// gvkToType allows one to figure out the go type of an object with
// the given version and name.
gvkToType map[schema.GroupVersionKind]reflect.Type
// typeToGroupVersion allows one to find metadata for a given go object.
// typeToGVK allows one to find metadata for a given go object.
// The reflect.Type we index by should *not* be a pointer.
typeToGVK map[reflect.Type][]schema.GroupVersionKind
@ -64,7 +64,7 @@ type Scheme struct {
// resource field labels in that version to internal version.
fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc
// defaulterFuncs is an array of interfaces to be called with an object to provide defaulting
// defaulterFuncs is a map to funcs to be called with an object to provide defaulting
// the provided object must be a pointer.
defaulterFuncs map[reflect.Type]func(interface{})

View File

@ -166,7 +166,20 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
strictErrs, err := s.unmarshal(into, data, originalData)
if err != nil {
return nil, actual, err
} else if len(strictErrs) > 0 {
}
// when decoding directly into a provided unstructured object,
// extract the actual gvk decoded from the provided data,
// and ensure it is non-empty.
if isUnstructured {
*actual = into.GetObjectKind().GroupVersionKind()
if len(actual.Kind) == 0 {
return nil, actual, runtime.NewMissingKindErr(string(originalData))
}
// TODO(109023): require apiVersion here as well once unstructuredJSONScheme#Decode does
}
if len(strictErrs) > 0 {
return into, actual, runtime.NewStrictDecodingError(strictErrs)
}
return into, actual, nil
@ -261,9 +274,9 @@ func (s *Serializer) unmarshal(into runtime.Object, data, originalData []byte) (
var strictJSONErrs []error
if u, isUnstructured := into.(runtime.Unstructured); isUnstructured {
// Unstructured is a custom unmarshaler that gets delegated
// to, so inorder to detect strict JSON errors we need
// to, so in order to detect strict JSON errors we need
// to unmarshal directly into the object.
m := u.UnstructuredContent()
m := map[string]interface{}{}
strictJSONErrs, err = kjson.UnmarshalStrict(data, &m)
u.SetUnstructuredContent(m)
} else {

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
"k8s.io/apimachinery/pkg/util/framer"
"k8s.io/klog/v2"
)
var (
@ -86,6 +87,7 @@ type Serializer struct {
}
var _ runtime.Serializer = &Serializer{}
var _ runtime.EncoderWithAllocator = &Serializer{}
var _ recognizer.RecognizingDecoder = &Serializer{}
const serializerIdentifier runtime.Identifier = "protobuf"
@ -161,22 +163,36 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
}
// Encode serializes the provided object to the given writer.
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(s.Identifier(), s.doEncode, w)
}
return s.doEncode(obj, w)
// EncodeWithAllocator writes an object to the provided writer.
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization.
func (s *Serializer) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
return s.encode(obj, w, memAlloc)
}
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
// Encode serializes the provided object to the given writer.
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
return s.encode(obj, w, &runtime.SimpleAllocator{})
}
func (s *Serializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(s.Identifier(), func(obj runtime.Object, w io.Writer) error { return s.doEncode(obj, w, memAlloc) }, w)
}
return s.doEncode(obj, w, memAlloc)
}
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
if memAlloc == nil {
klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
memAlloc = &runtime.SimpleAllocator{}
}
prefixSize := uint64(len(s.prefix))
var unk runtime.Unknown
switch t := obj.(type) {
case *runtime.Unknown:
estimatedSize := prefixSize + uint64(t.Size())
data := make([]byte, estimatedSize)
data := memAlloc.Allocate(estimatedSize)
i, err := t.MarshalTo(data[prefixSize:])
if err != nil {
return err
@ -196,11 +212,11 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
switch t := obj.(type) {
case bufferedMarshaller:
// this path performs a single allocation during write but requires the caller to implement
// the more efficient Size and MarshalToSizedBuffer methods
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods
encodedSize := uint64(t.Size())
estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
data := make([]byte, estimatedSize)
data := memAlloc.Allocate(estimatedSize)
i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
if err != nil {
@ -221,7 +237,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
unk.Raw = data
estimatedSize := prefixSize + uint64(unk.Size())
data = make([]byte, estimatedSize)
data = memAlloc.Allocate(estimatedSize)
i, err := unk.MarshalTo(data[prefixSize:])
if err != nil {
@ -395,19 +411,33 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater,
// Encode serializes the provided object to the given writer. Overrides is ignored.
func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(s.Identifier(), s.doEncode, w)
}
return s.doEncode(obj, w)
return s.encode(obj, w, &runtime.SimpleAllocator{})
}
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
// EncodeWithAllocator writes an object to the provided writer.
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization.
func (s *RawSerializer) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
return s.encode(obj, w, memAlloc)
}
func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(s.Identifier(), func(obj runtime.Object, w io.Writer) error { return s.doEncode(obj, w, memAlloc) }, w)
}
return s.doEncode(obj, w, memAlloc)
}
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
if memAlloc == nil {
klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
memAlloc = &runtime.SimpleAllocator{}
}
switch t := obj.(type) {
case bufferedReverseMarshaller:
// this path performs a single allocation during write but requires the caller to implement
// the more efficient Size and MarshalToSizedBuffer methods
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods
encodedSize := uint64(t.Size())
data := make([]byte, encodedSize)
data := memAlloc.Allocate(encodedSize)
n, err := t.MarshalToSizedBuffer(data)
if err != nil {
@ -417,10 +447,10 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
return err
case bufferedMarshaller:
// this path performs a single allocation during write but requires the caller to implement
// the more efficient Size and MarshalTo methods
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalTo methods
encodedSize := uint64(t.Size())
data := make([]byte, encodedSize)
data := memAlloc.Allocate(encodedSize)
n, err := t.MarshalTo(data)
if err != nil {

View File

@ -134,3 +134,23 @@ func (e *encoder) Encode(obj runtime.Object) error {
e.buf.Reset()
return err
}
type encoderWithAllocator struct {
writer io.Writer
encoder runtime.EncoderWithAllocator
memAllocator runtime.MemoryAllocator
}
// NewEncoderWithAllocator returns a new streaming encoder
func NewEncoderWithAllocator(w io.Writer, e runtime.EncoderWithAllocator, a runtime.MemoryAllocator) Encoder {
return &encoderWithAllocator{
writer: w,
encoder: e,
memAllocator: a,
}
}
// Encode writes the provided object to the nested writer
func (e *encoderWithAllocator) Encode(obj runtime.Object) error {
return e.encoder.EncodeWithAllocator(obj, e.writer, e.memAllocator)
}

View File

@ -89,6 +89,8 @@ type codec struct {
originalSchemeName string
}
var _ runtime.EncoderWithAllocator = &codec{}
var identifiersMap sync.Map
type codecIdentifier struct {
@ -133,24 +135,34 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
}
}
var strictDecodingErr error
var strictDecodingErrs []error
obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
if err != nil {
if obj != nil && runtime.IsStrictDecodingError(err) {
// save the strictDecodingError and the caller decide what to do with it
strictDecodingErr = err
if strictErr, ok := runtime.AsStrictDecodingError(err); obj != nil && ok {
// save the strictDecodingError and let the caller decide what to do with it
strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...)
} else {
return nil, gvk, err
}
}
// TODO: look into strict handling of nested object decoding
if d, ok := obj.(runtime.NestedObjectDecoder); ok {
if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
return nil, gvk, err
if strictErr, ok := runtime.AsStrictDecodingError(err); ok {
// save the strictDecodingError let and the caller decide what to do with it
strictDecodingErrs = append(strictDecodingErrs, strictErr.Errors()...)
} else {
return nil, gvk, err
}
}
}
// aggregate the strict decoding errors into one
var strictDecodingErr error
if len(strictDecodingErrs) > 0 {
strictDecodingErr = runtime.NewStrictDecodingError(strictDecodingErrs)
}
// if we specify a target, use generic conversion.
if into != nil {
// perform defaulting if requested
@ -182,19 +194,40 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
return out, gvk, strictDecodingErr
}
// EncodeWithAllocator ensures the provided object is output in the appropriate group and version, invoking
// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
// In addition, it allows for providing a memory allocator for efficient memory usage during object serialization.
func (c *codec) EncodeWithAllocator(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
return c.encode(obj, w, memAlloc)
}
// Encode ensures the provided object is output in the appropriate group and version, invoking
// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(c.Identifier(), c.doEncode, w)
}
return c.doEncode(obj, w)
return c.encode(obj, w, nil)
}
func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
func (c *codec) encode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(c.Identifier(), func(obj runtime.Object, w io.Writer) error { return c.doEncode(obj, w, memAlloc) }, w)
}
return c.doEncode(obj, w, memAlloc)
}
func (c *codec) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
encodeFn := c.encoder.Encode
if memAlloc != nil {
if encoder, supportsAllocator := c.encoder.(runtime.EncoderWithAllocator); supportsAllocator {
encodeFn = func(obj runtime.Object, w io.Writer) error {
return encoder.EncodeWithAllocator(obj, w, memAlloc)
}
} else {
klog.V(6).Infof("a memory allocator was provided but the encoder %s doesn't implement the runtime.EncoderWithAllocator, using regular encoder.Encode method", c.encoder.Identifier())
}
}
switch obj := obj.(type) {
case *runtime.Unknown:
return c.encoder.Encode(obj, w)
return encodeFn(obj, w)
case runtime.Unstructured:
// An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
// because the top-level type matches our desired destination type. actually send the object to the converter
@ -203,14 +236,14 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
// avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
objGVK := obj.GetObjectKind().GroupVersionKind()
if len(objGVK.Version) == 0 {
return c.encoder.Encode(obj, w)
return encodeFn(obj, w)
}
targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
if !ok {
return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
}
if targetGVK == objGVK {
return c.encoder.Encode(obj, w)
return encodeFn(obj, w)
}
}
}
@ -232,7 +265,7 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
}
}
objectKind.SetGroupVersionKind(gvks[0])
return c.encoder.Encode(obj, w)
return encodeFn(obj, w)
}
// Perform a conversion if necessary
@ -248,7 +281,7 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
}
// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
return c.encoder.Encode(out, w)
return encodeFn(out, w)
}
// Identifier implements runtime.Encoder interface.