rebase: bump k8s.io/kubernetes in the k8s-dependencies group

Bumps the k8s-dependencies group with 1 update: [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes).

Updates `k8s.io/kubernetes` from 1.32.3 to 1.33.0
- [Release notes](https://github.com/kubernetes/kubernetes/releases)
- [Commits](https://github.com/kubernetes/kubernetes/compare/v1.32.3...v1.33.0)

---
updated-dependencies:
- dependency-name: k8s.io/kubernetes
  dependency-version: 1.33.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: k8s-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
dependabot[bot]
2025-04-28 22:16:28 +00:00
committed by mergify[bot]
parent 4147d5d15a
commit 51895f8619
699 changed files with 51590 additions and 17096 deletions

View File

@ -48,4 +48,4 @@ limitations under the License.
//
// As a bonus, a few common types useful from all api objects and versions
// are provided in types.go.
package runtime // import "k8s.io/apimachinery/pkg/runtime"
package runtime

View File

@ -259,6 +259,7 @@ type ObjectDefaulter interface {
type ObjectVersioner interface {
ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
PrioritizedVersionsForGroup(group string) []schema.GroupVersion
}
// ObjectConvertor converts an object to a different version.

View File

@ -17,15 +17,18 @@ limitations under the License.
package runtime
import (
"context"
"fmt"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/api/operation"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/naming"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// Scheme defines methods for serializing and deserializing API objects, a type
@ -68,6 +71,12 @@ type Scheme struct {
// the provided object must be a pointer.
defaulterFuncs map[reflect.Type]func(interface{})
// validationFuncs is a map to funcs to be called with an object to perform validation.
// The provided object must be a pointer.
// If oldObject is non-nil, update validation is performed and may perform additional
// validation such as transition rules and immutability checks.
validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList
// converter stores all registered conversion functions. It also has
// default converting behavior.
converter *conversion.Converter
@ -96,6 +105,7 @@ func NewScheme() *Scheme {
unversionedKinds: map[string]reflect.Type{},
fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
defaulterFuncs: map[reflect.Type]func(interface{}){},
validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresource ...string) field.ErrorList{},
versionPriority: map[string][]string{},
schemeName: naming.GetNameFromCallsite(internalPackages...),
}
@ -347,6 +357,35 @@ func (s *Scheme) Default(src Object) {
}
}
// AddValidationFunc registered a function that can validate the object, and
// oldObject. These functions will be invoked when Validate() or ValidateUpdate()
// is called. The function will never be called unless the validated object
// matches srcType. If this function is invoked twice with the same srcType, the
// fn passed to the later call will be used instead.
func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList) {
s.validationFuncs[reflect.TypeOf(srcType)] = fn
}
// Validate validates the provided Object according to the generated declarative validation code.
// WARNING: This does not validate all objects! The handwritten validation code in validation.go
// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
func (s *Scheme) Validate(ctx context.Context, options sets.Set[string], object Object, subresources ...string) field.ErrorList {
if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
return fn(ctx, operation.Operation{Type: operation.Create, Options: options}, object, nil, subresources...)
}
return nil
}
// ValidateUpdate validates the provided object and oldObject according to the generated declarative validation code.
// WARNING: This does not validate all objects! The handwritten validation code in validation.go
// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
func (s *Scheme) ValidateUpdate(ctx context.Context, options sets.Set[string], object, oldObject Object, subresources ...string) field.ErrorList {
if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
return fn(ctx, operation.Operation{Type: operation.Update, Options: options}, object, oldObject, subresources...)
}
return nil
}
// Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example,

View File

@ -140,7 +140,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec
var wg sync.WaitGroup
wg.Add(1)
defer wg.Done()
c = checker{
placeholder := checker{
safe: func() bool {
wg.Wait()
return c.safe()
@ -150,7 +150,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec
return c.check(rv, depth)
},
}
if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded {
if actual, loaded := cache.m.LoadOrStore(rt, &placeholder); loaded {
// Someone else stored an entry for this type, use it.
return *actual.(*checker)
}

View File

@ -28,7 +28,7 @@ import (
func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo {
jsonSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON},
)
jsonSerializerType := runtime.SerializerInfo{
MediaType: runtime.ContentTypeJSON,
@ -38,7 +38,7 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
Serializer: jsonSerializer,
StrictSerializer: json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
json.SerializerOptions{Yaml: false, Pretty: false, Strict: true, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON},
),
StreamSerializer: &runtime.StreamSerializerInfo{
EncodesAsText: true,
@ -61,7 +61,9 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
mf, scheme, scheme,
json.SerializerOptions{Yaml: true, Pretty: false, Strict: true},
)
protoSerializer := protobuf.NewSerializer(scheme, scheme)
protoSerializer := protobuf.NewSerializerWithOptions(scheme, scheme, protobuf.SerializerOptions{
StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToProtobuf,
})
protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
serializers := []runtime.SerializerInfo{
@ -113,6 +115,9 @@ type CodecFactoryOptions struct {
// Pretty includes a pretty serializer along with the non-pretty one
Pretty bool
StreamingCollectionsEncodingToJSON bool
StreamingCollectionsEncodingToProtobuf bool
serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo
}
@ -147,6 +152,18 @@ func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.S
}
}
func WithStreamingCollectionEncodingToJSON() CodecFactoryOptionsMutator {
return func(options *CodecFactoryOptions) {
options.StreamingCollectionsEncodingToJSON = true
}
}
func WithStreamingCollectionEncodingToProtobuf() CodecFactoryOptionsMutator {
return func(options *CodecFactoryOptions) {
options.StreamingCollectionsEncodingToProtobuf = true
}
}
// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
// and conversion wrappers to define preferred internal and external versions. In the future,
// as the internal version is used less, callers may instead use a defaulting serializer and

View File

@ -0,0 +1,230 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package json
import (
"encoding/json"
"fmt"
"io"
"maps"
"slices"
"sort"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/conversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
func streamEncodeCollections(obj runtime.Object, w io.Writer) (bool, error) {
list, ok := obj.(*unstructured.UnstructuredList)
if ok {
return true, streamingEncodeUnstructuredList(w, list)
}
if _, ok := obj.(json.Marshaler); ok {
return false, nil
}
typeMeta, listMeta, items, err := getListMeta(obj)
if err == nil {
return true, streamingEncodeList(w, typeMeta, listMeta, items)
}
return false, nil
}
// getListMeta implements list extraction logic for json stream serialization.
//
// Reason for a custom logic instead of reusing accessors from meta package:
// * Validate json tags to prevent incompatibility with json standard package.
// * ListMetaAccessor doesn't distinguish empty from nil value.
// * TypeAccessort reparsing "apiVersion" and serializing it with "{group}/{version}"
func getListMeta(list runtime.Object) (metav1.TypeMeta, metav1.ListMeta, []runtime.Object, error) {
listValue, err := conversion.EnforcePtr(list)
if err != nil {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err
}
listType := listValue.Type()
if listType.NumField() != 3 {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListType to have 3 fields")
}
// TypeMeta
typeMeta, ok := listValue.Field(0).Interface().(metav1.TypeMeta)
if !ok {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected TypeMeta field to have TypeMeta type")
}
if listType.Field(0).Tag.Get("json") != ",inline" {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected TypeMeta json field tag to be ",inline"`)
}
// ListMeta
listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta)
if !ok {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListMeta field to have ListMeta type")
}
if listType.Field(1).Tag.Get("json") != "metadata,omitempty" {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected ListMeta json field tag to be "metadata,omitempty"`)
}
// Items
items, err := meta.ExtractList(list)
if err != nil {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err
}
if listType.Field(2).Tag.Get("json") != "items" {
return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected Items json field tag to be "items"`)
}
return typeMeta, listMeta, items, nil
}
func streamingEncodeList(w io.Writer, typeMeta metav1.TypeMeta, listMeta metav1.ListMeta, items []runtime.Object) error {
// Start
if _, err := w.Write([]byte(`{`)); err != nil {
return err
}
// TypeMeta
if typeMeta.Kind != "" {
if err := encodeKeyValuePair(w, "kind", typeMeta.Kind, []byte(",")); err != nil {
return err
}
}
if typeMeta.APIVersion != "" {
if err := encodeKeyValuePair(w, "apiVersion", typeMeta.APIVersion, []byte(",")); err != nil {
return err
}
}
// ListMeta
if err := encodeKeyValuePair(w, "metadata", listMeta, []byte(",")); err != nil {
return err
}
// Items
if err := encodeItemsObjectSlice(w, items); err != nil {
return err
}
// End
_, err := w.Write([]byte("}\n"))
return err
}
func encodeItemsObjectSlice(w io.Writer, items []runtime.Object) (err error) {
if items == nil {
err := encodeKeyValuePair(w, "items", nil, nil)
return err
}
_, err = w.Write([]byte(`"items":[`))
if err != nil {
return err
}
suffix := []byte(",")
for i, item := range items {
if i == len(items)-1 {
suffix = nil
}
err := encodeValue(w, item, suffix)
if err != nil {
return err
}
}
_, err = w.Write([]byte("]"))
if err != nil {
return err
}
return err
}
func streamingEncodeUnstructuredList(w io.Writer, list *unstructured.UnstructuredList) error {
_, err := w.Write([]byte(`{`))
if err != nil {
return err
}
keys := slices.Collect(maps.Keys(list.Object))
if _, exists := list.Object["items"]; !exists {
keys = append(keys, "items")
}
sort.Strings(keys)
suffix := []byte(",")
for i, key := range keys {
if i == len(keys)-1 {
suffix = nil
}
if key == "items" {
err = encodeItemsUnstructuredSlice(w, list.Items, suffix)
} else {
err = encodeKeyValuePair(w, key, list.Object[key], suffix)
}
if err != nil {
return err
}
}
_, err = w.Write([]byte("}\n"))
return err
}
func encodeItemsUnstructuredSlice(w io.Writer, items []unstructured.Unstructured, suffix []byte) (err error) {
_, err = w.Write([]byte(`"items":[`))
if err != nil {
return err
}
comma := []byte(",")
for i, item := range items {
if i == len(items)-1 {
comma = nil
}
err := encodeValue(w, item.Object, comma)
if err != nil {
return err
}
}
_, err = w.Write([]byte("]"))
if err != nil {
return err
}
if len(suffix) > 0 {
_, err = w.Write(suffix)
}
return err
}
func encodeKeyValuePair(w io.Writer, key string, value any, suffix []byte) (err error) {
err = encodeValue(w, key, []byte(":"))
if err != nil {
return err
}
err = encodeValue(w, value, suffix)
if err != nil {
return err
}
return err
}
func encodeValue(w io.Writer, value any, suffix []byte) error {
data, err := json.Marshal(value)
if err != nil {
return err
}
_, err = w.Write(data)
if err != nil {
return err
}
if len(suffix) > 0 {
_, err = w.Write(suffix)
}
return err
}

View File

@ -36,7 +36,7 @@ import (
// is not nil, the object has the group, version, and kind fields set.
// Deprecated: use NewSerializerWithOptions instead.
func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false, false})
}
// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
@ -44,7 +44,7 @@ func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtim
// matches JSON, and will error if constructs are used that do not serialize to JSON.
// Deprecated: use NewSerializerWithOptions instead.
func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false, false})
}
// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
@ -93,6 +93,9 @@ type SerializerOptions struct {
// Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
// Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
Strict bool
// StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed.
StreamingCollectionsEncoding bool
}
// Serializer handles encoding versioned objects into the proper JSON form
@ -242,6 +245,15 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
_, err = w.Write(data)
return err
}
if s.options.StreamingCollectionsEncoding {
ok, err := streamEncodeCollections(obj, w)
if err != nil {
return err
}
if ok {
return nil
}
}
encoder := json.NewEncoder(w)
return encoder.Encode(obj)
}

View File

@ -0,0 +1,174 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protobuf
import (
"errors"
"io"
"math/bits"
"github.com/gogo/protobuf/proto"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
)
var (
errFieldCount = errors.New("expected ListType to have 3 fields")
errTypeMetaField = errors.New("expected TypeMeta field to have TypeMeta type")
errTypeMetaProtobufTag = errors.New(`expected TypeMeta protobuf field tag to be ""`)
errListMetaField = errors.New("expected ListMeta field to have ListMeta type")
errListMetaProtobufTag = errors.New(`expected ListMeta protobuf field tag to be "bytes,1,opt,name=metadata"`)
errItemsProtobufTag = errors.New(`expected Items protobuf field tag to be "bytes,2,rep,name=items"`)
errItemsSizer = errors.New(`expected Items elements to implement proto.Sizer`)
)
// getStreamingListData implements list extraction logic for protobuf stream serialization.
//
// Reason for a custom logic instead of reusing accessors from meta package:
// * Validate proto tags to prevent incompatibility with proto standard package.
// * ListMetaAccessor doesn't distinguish empty from nil value.
// * TypeAccessor reparsing "apiVersion" and serializing it with "{group}/{version}"
func getStreamingListData(list runtime.Object) (data streamingListData, err error) {
listValue, err := conversion.EnforcePtr(list)
if err != nil {
return data, err
}
listType := listValue.Type()
if listType.NumField() != 3 {
return data, errFieldCount
}
// TypeMeta: validated, but not returned as is not serialized.
_, ok := listValue.Field(0).Interface().(metav1.TypeMeta)
if !ok {
return data, errTypeMetaField
}
if listType.Field(0).Tag.Get("protobuf") != "" {
return data, errTypeMetaProtobufTag
}
// ListMeta
listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta)
if !ok {
return data, errListMetaField
}
// if we were ever to relax the protobuf tag check we should update the hardcoded `0xa` below when writing ListMeta.
if listType.Field(1).Tag.Get("protobuf") != "bytes,1,opt,name=metadata" {
return data, errListMetaProtobufTag
}
data.listMeta = listMeta
// Items; if we were ever to relax the protobuf tag check we should update the hardcoded `0x12` below when writing Items.
if listType.Field(2).Tag.Get("protobuf") != "bytes,2,rep,name=items" {
return data, errItemsProtobufTag
}
items, err := meta.ExtractList(list)
if err != nil {
return data, err
}
data.items = items
data.totalSize, data.listMetaSize, data.itemsSizes, err = listSize(listMeta, items)
return data, err
}
type streamingListData struct {
// totalSize is the total size of the serialized List object, including their proto headers/size bytes
totalSize int
// listMetaSize caches results from .Size() call to listMeta, doesn't include header bytes (field identifier, size)
listMetaSize int
listMeta metav1.ListMeta
// itemsSizes caches results from .Size() call to items, doesn't include header bytes (field identifier, size)
itemsSizes []int
items []runtime.Object
}
// listSize return size of ListMeta and items to be later used for preallocations.
// listMetaSize and itemSizes do not include header bytes (field identifier, size).
func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, listMetaSize int, itemSizes []int, err error) {
// ListMeta
listMetaSize = listMeta.Size()
totalSize += 1 + sovGenerated(uint64(listMetaSize)) + listMetaSize
// Items
itemSizes = make([]int, len(items))
for i, item := range items {
sizer, ok := item.(proto.Sizer)
if !ok {
return totalSize, listMetaSize, nil, errItemsSizer
}
n := sizer.Size()
itemSizes[i] = n
totalSize += 1 + sovGenerated(uint64(n)) + n
}
return totalSize, listMetaSize, itemSizes, nil
}
func streamingEncodeUnknownList(w io.Writer, unk runtime.Unknown, listData streamingListData, memAlloc runtime.MemoryAllocator) error {
_, err := w.Write(protoEncodingPrefix)
if err != nil {
return err
}
// encodeList is responsible for encoding the List into the unknown Raw.
encodeList := func(writer io.Writer) (int, error) {
return streamingEncodeList(writer, listData, memAlloc)
}
_, err = unk.MarshalToWriter(w, listData.totalSize, encodeList)
return err
}
func streamingEncodeList(w io.Writer, listData streamingListData, memAlloc runtime.MemoryAllocator) (size int, err error) {
// ListMeta; 0xa = (1 << 3) | 2; field number: 1, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
n, err := doEncodeWithHeader(&listData.listMeta, w, 0xa, listData.listMetaSize, memAlloc)
size += n
if err != nil {
return size, err
}
// Items; 0x12 = (2 << 3) | 2; field number: 2, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure
for i, item := range listData.items {
n, err := doEncodeWithHeader(item, w, 0x12, listData.itemsSizes[i], memAlloc)
size += n
if err != nil {
return size, err
}
}
return size, nil
}
func writeVarintGenerated(w io.Writer, v int) (int, error) {
buf := make([]byte, sovGenerated(uint64(v)))
encodeVarintGenerated(buf, len(buf), uint64(v))
return w.Write(buf)
}
// sovGenerated is copied from `generated.pb.go` returns size of varint.
func sovGenerated(v uint64) int {
return (bits.Len64(v|1) + 6) / 7
}
// encodeVarintGenerated is copied from `generated.pb.go` encodes varint.
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}

View File

@ -15,4 +15,4 @@ limitations under the License.
*/
// Package protobuf provides a Kubernetes serializer for the protobuf format.
package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
package protobuf

View File

@ -72,10 +72,18 @@ func IsNotMarshalable(err error) bool {
// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
// as-is (any type info passed with the object will be used).
func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
return NewSerializerWithOptions(creater, typer, SerializerOptions{})
}
// NewSerializerWithOptions creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
// as-is (any type info passed with the object will be used).
func NewSerializerWithOptions(creater runtime.ObjectCreater, typer runtime.ObjectTyper, opts SerializerOptions) *Serializer {
return &Serializer{
prefix: protoEncodingPrefix,
creater: creater,
typer: typer,
options: opts,
}
}
@ -84,6 +92,14 @@ type Serializer struct {
prefix []byte
creater runtime.ObjectCreater
typer runtime.ObjectTyper
options SerializerOptions
}
// SerializerOptions holds the options which are used to configure a Proto serializer.
type SerializerOptions struct {
// StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed.
StreamingCollectionsEncoding bool
}
var _ runtime.Serializer = &Serializer{}
@ -209,6 +225,13 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.
},
}
}
if s.options.StreamingCollectionsEncoding {
listData, err := getStreamingListData(obj)
if err == nil {
// Doesn't honor custom proto marshaling methods (like json streaming), because all proto objects implement proto methods.
return streamingEncodeUnknownList(w, unk, listData, memAlloc)
}
}
switch t := obj.(type) {
case bufferedMarshaller:
@ -428,6 +451,39 @@ func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime
}
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error {
_, err := doEncode(obj, w, nil, memAlloc)
return err
}
func doEncodeWithHeader(obj any, w io.Writer, field byte, precomputedSize int, memAlloc runtime.MemoryAllocator) (size int, err error) {
// Field identifier
n, err := w.Write([]byte{field})
size += n
if err != nil {
return size, err
}
// Size
n, err = writeVarintGenerated(w, precomputedSize)
size += n
if err != nil {
return size, err
}
// Obj
n, err = doEncode(obj, w, &precomputedSize, memAlloc)
size += n
if err != nil {
return size, err
}
if n != precomputedSize {
return size, fmt.Errorf("the size value was %d, but doEncode wrote %d bytes to data", precomputedSize, n)
}
return size, nil
}
// doEncode encodes provided object into writer using a allocator if possible.
// Avoids call by object Size if precomputedObjSize is provided.
// precomputedObjSize should not include header bytes (field identifier, size).
func doEncode(obj any, w io.Writer, precomputedObjSize *int, memAlloc runtime.MemoryAllocator) (int, error) {
if memAlloc == nil {
klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator")
memAlloc = &runtime.SimpleAllocator{}
@ -436,40 +492,43 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runti
case bufferedReverseMarshaller:
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods
encodedSize := uint64(t.Size())
data := memAlloc.Allocate(encodedSize)
if precomputedObjSize == nil {
s := t.Size()
precomputedObjSize = &s
}
data := memAlloc.Allocate(uint64(*precomputedObjSize))
n, err := t.MarshalToSizedBuffer(data)
if err != nil {
return err
return 0, err
}
_, err = w.Write(data[:n])
return err
return w.Write(data[:n])
case bufferedMarshaller:
// this path performs a single allocation during write only when the Allocator wasn't provided
// it also requires the caller to implement the more efficient Size and MarshalTo methods
encodedSize := uint64(t.Size())
data := memAlloc.Allocate(encodedSize)
if precomputedObjSize == nil {
s := t.Size()
precomputedObjSize = &s
}
data := memAlloc.Allocate(uint64(*precomputedObjSize))
n, err := t.MarshalTo(data)
if err != nil {
return err
return 0, err
}
_, err = w.Write(data[:n])
return err
return w.Write(data[:n])
case proto.Marshaler:
// this path performs extra allocations
data, err := t.Marshal()
if err != nil {
return err
return 0, err
}
_, err = w.Write(data)
return err
return w.Write(data)
default:
return errNotMarshalable{reflect.TypeOf(obj)}
return 0, errNotMarshalable{reflect.TypeOf(obj)}
}
}

View File

@ -18,6 +18,7 @@ package runtime
import (
"fmt"
"io"
)
type ProtobufMarshaller interface {
@ -28,6 +29,124 @@ type ProtobufReverseMarshaller interface {
MarshalToSizedBuffer(data []byte) (int, error)
}
const (
typeMetaTag = 0xa
rawTag = 0x12
contentEncodingTag = 0x1a
contentTypeTag = 0x22
// max length of a varint for a uint64
maxUint64VarIntLength = 10
)
// MarshalToWriter allows a caller to provide a streaming writer for raw bytes,
// instead of populating them inside the Unknown struct.
// rawSize is the number of bytes rawWriter will write in a success case.
// writeRaw is called when it is time to write the raw bytes. It must return `rawSize, nil` or an error.
func (m *Unknown) MarshalToWriter(w io.Writer, rawSize int, writeRaw func(io.Writer) (int, error)) (int, error) {
size := 0
// reuse the buffer for varint marshaling
varintBuffer := make([]byte, maxUint64VarIntLength)
writeVarint := func(i int) (int, error) {
offset := encodeVarintGenerated(varintBuffer, len(varintBuffer), uint64(i))
return w.Write(varintBuffer[offset:])
}
// TypeMeta
{
n, err := w.Write([]byte{typeMetaTag})
size += n
if err != nil {
return size, err
}
typeMetaBytes, err := m.TypeMeta.Marshal()
if err != nil {
return size, err
}
n, err = writeVarint(len(typeMetaBytes))
size += n
if err != nil {
return size, err
}
n, err = w.Write(typeMetaBytes)
size += n
if err != nil {
return size, err
}
}
// Raw, delegating write to writeRaw()
{
n, err := w.Write([]byte{rawTag})
size += n
if err != nil {
return size, err
}
n, err = writeVarint(rawSize)
size += n
if err != nil {
return size, err
}
n, err = writeRaw(w)
size += n
if err != nil {
return size, err
}
if n != int(rawSize) {
return size, fmt.Errorf("the size value was %d, but encoding wrote %d bytes to data", rawSize, n)
}
}
// ContentEncoding
{
n, err := w.Write([]byte{contentEncodingTag})
size += n
if err != nil {
return size, err
}
n, err = writeVarint(len(m.ContentEncoding))
size += n
if err != nil {
return size, err
}
n, err = w.Write([]byte(m.ContentEncoding))
size += n
if err != nil {
return size, err
}
}
// ContentEncoding
{
n, err := w.Write([]byte{contentTypeTag})
size += n
if err != nil {
return size, err
}
n, err = writeVarint(len(m.ContentType))
size += n
if err != nil {
return size, err
}
n, err = w.Write([]byte(m.ContentType))
size += n
if err != nil {
return size, err
}
}
return size, nil
}
// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller.
func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
@ -43,12 +162,12 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
copy(data[i:], m.ContentType)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
i--
data[i] = 0x22
data[i] = contentTypeTag
i -= len(m.ContentEncoding)
copy(data[i:], m.ContentEncoding)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
i--
data[i] = 0x1a
data[i] = contentEncodingTag
if b != nil {
if r, ok := b.(ProtobufReverseMarshaller); ok {
n1, err := r.MarshalToSizedBuffer(data[:i])
@ -75,7 +194,7 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
}
i = encodeVarintGenerated(data, i, size)
i--
data[i] = 0x12
data[i] = rawTag
}
n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i])
if err != nil {
@ -84,6 +203,6 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
i -= n2
i = encodeVarintGenerated(data, i, uint64(n2))
i--
data[i] = 0xa
data[i] = typeMetaTag
return msgSize - i, nil
}