mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: bump k8s.io/kubernetes in the k8s-dependencies group
Bumps the k8s-dependencies group with 1 update: [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes). Updates `k8s.io/kubernetes` from 1.32.3 to 1.33.0 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.32.3...v1.33.0) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-version: 1.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
4147d5d15a
commit
51895f8619
2
vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
generated
vendored
@ -15,4 +15,4 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errors implements various utility functions and types around errors.
|
||||
package errors // import "k8s.io/apimachinery/pkg/util/errors"
|
||||
package errors
|
||||
|
6
vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
generated
vendored
6
vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
generated
vendored
@ -91,12 +91,12 @@ func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
|
||||
}
|
||||
n, err := io.ReadAtLeast(r.r, data[:max], int(max))
|
||||
r.remaining -= n
|
||||
if err == io.ErrShortBuffer || r.remaining > 0 {
|
||||
return n, io.ErrShortBuffer
|
||||
}
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if r.remaining > 0 {
|
||||
return n, io.ErrShortBuffer
|
||||
}
|
||||
if n != expect {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
14
vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
generated
vendored
14
vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go
generated
vendored
@ -20,24 +20,24 @@ limitations under the License.
|
||||
package intstr
|
||||
|
||||
import (
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"sigs.k8s.io/randfill"
|
||||
)
|
||||
|
||||
// Fuzz satisfies fuzz.Interface
|
||||
func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
|
||||
// RandFill satisfies randfill.NativeSelfFiller
|
||||
func (intstr *IntOrString) RandFill(c randfill.Continue) {
|
||||
if intstr == nil {
|
||||
return
|
||||
}
|
||||
if c.RandBool() {
|
||||
if c.Bool() {
|
||||
intstr.Type = Int
|
||||
c.Fuzz(&intstr.IntVal)
|
||||
c.Fill(&intstr.IntVal)
|
||||
intstr.StrVal = ""
|
||||
} else {
|
||||
intstr.Type = String
|
||||
intstr.IntVal = 0
|
||||
c.Fuzz(&intstr.StrVal)
|
||||
c.Fill(&intstr.StrVal)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure IntOrString implements fuzz.Interface
|
||||
var _ fuzz.Interface = &IntOrString{}
|
||||
var _ randfill.NativeSelfFiller = &IntOrString{}
|
||||
|
46
vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
generated
vendored
46
vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
generated
vendored
@ -36,6 +36,11 @@ var (
|
||||
)
|
||||
|
||||
// PanicHandlers is a list of functions which will be invoked when a panic happens.
|
||||
//
|
||||
// The code invoking these handlers prepares a contextual logger so that
|
||||
// klog.FromContext(ctx) already skips over the panic handler itself and
|
||||
// several other intermediate functions, ideally such that the log output
|
||||
// is attributed to the code which triggered the panic.
|
||||
var PanicHandlers = []func(context.Context, interface{}){logPanic}
|
||||
|
||||
// HandleCrash simply catches a crash and logs an error. Meant to be called via
|
||||
@ -45,7 +50,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic}
|
||||
//
|
||||
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
|
||||
//
|
||||
// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
|
||||
// Contextual logging: HandleCrashWithContext or HandleCrashWithLogger should be used instead of HandleCrash in code which supports contextual logging.
|
||||
func HandleCrash(additionalHandlers ...func(interface{})) {
|
||||
if r := recover(); r != nil {
|
||||
additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
|
||||
@ -74,10 +79,30 @@ func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(cont
|
||||
}
|
||||
}
|
||||
|
||||
// handleCrash is the common implementation of HandleCrash and HandleCrash.
|
||||
// HandleCrashWithLogger simply catches a crash and logs an error. Meant to be called via
|
||||
// defer. Additional context-specific handlers can be provided, and will be
|
||||
// called in case of panic. HandleCrash actually crashes, after calling the
|
||||
// handlers and logging the panic message.
|
||||
//
|
||||
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
|
||||
func HandleCrashWithLogger(logger klog.Logger, additionalHandlers ...func(context.Context, interface{})) {
|
||||
if r := recover(); r != nil {
|
||||
ctx := klog.NewContext(context.Background(), logger)
|
||||
handleCrash(ctx, r, additionalHandlers...)
|
||||
}
|
||||
}
|
||||
|
||||
// handleCrash is the common implementation of the HandleCrash* variants.
|
||||
// Having those call a common implementation ensures that the stack depth
|
||||
// is the same regardless through which path the handlers get invoked.
|
||||
func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) {
|
||||
// We don't really know how many call frames to skip because the Go
|
||||
// panic handler is between us and the code where the panic occurred.
|
||||
// If it's one function (as in Go 1.21), then skipping four levels
|
||||
// gets us to the function which called the `defer HandleCrashWithontext(...)`.
|
||||
logger := klog.FromContext(ctx).WithCallDepth(4)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
for _, fn := range PanicHandlers {
|
||||
fn(ctx, r)
|
||||
}
|
||||
@ -106,11 +131,7 @@ func logPanic(ctx context.Context, r interface{}) {
|
||||
stacktrace := make([]byte, size)
|
||||
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
|
||||
|
||||
// We don't really know how many call frames to skip because the Go
|
||||
// panic handler is between us and the code where the panic occurred.
|
||||
// If it's one function (as in Go 1.21), then skipping four levels
|
||||
// gets us to the function which called the `defer HandleCrashWithontext(...)`.
|
||||
logger := klog.FromContext(ctx).WithCallDepth(4)
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
// For backwards compatibility, conversion to string
|
||||
// is handled here instead of defering to the logging
|
||||
@ -176,12 +197,19 @@ func HandleError(err error) {
|
||||
// and key/value pairs.
|
||||
//
|
||||
// This variant should be used instead of HandleError because it supports
|
||||
// structured, contextual logging.
|
||||
// structured, contextual logging. Alternatively, [HandleErrorWithLogger] can
|
||||
// be used if a logger is available instead of a context.
|
||||
func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
|
||||
handleError(ctx, err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// handleError is the common implementation of HandleError and HandleErrorWithContext.
|
||||
// HandleErrorWithLogger is an alternative to [HandlerErrorWithContext] which accepts
|
||||
// a logger for contextual logging.
|
||||
func HandleErrorWithLogger(logger klog.Logger, err error, msg string, keysAndValues ...interface{}) {
|
||||
handleError(klog.NewContext(context.Background(), logger), err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// handleError is the common implementation of the HandleError* variants.
|
||||
// Using this common implementation ensures that the stack depth
|
||||
// is the same regardless through which path the handlers get invoked.
|
||||
func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package sets has generic set and specified sets. Generic set will
|
||||
// replace specified ones over time. And specific ones are deprecated.
|
||||
package sets // import "k8s.io/apimachinery/pkg/util/sets"
|
||||
package sets
|
||||
|
212
vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
generated
vendored
Normal file
212
vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package field
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrorMatcher is a helper for comparing Error objects.
|
||||
type ErrorMatcher struct {
|
||||
// TODO(thockin): consider whether type is ever NOT required, maybe just
|
||||
// assume it.
|
||||
matchType bool
|
||||
// TODO(thockin): consider whether field could be assumed - if the
|
||||
// "want" error has a nil field, don't match on field.
|
||||
matchField bool
|
||||
// TODO(thockin): consider whether value could be assumed - if the
|
||||
// "want" error has a nil value, don't match on field.
|
||||
matchValue bool
|
||||
matchOrigin bool
|
||||
matchDetail func(want, got string) bool
|
||||
requireOriginWhenInvalid bool
|
||||
}
|
||||
|
||||
// Matches returns true if the two Error objects match according to the
|
||||
// configured criteria.
|
||||
func (m ErrorMatcher) Matches(want, got *Error) bool {
|
||||
if m.matchType && want.Type != got.Type {
|
||||
return false
|
||||
}
|
||||
if m.matchField && want.Field != got.Field {
|
||||
return false
|
||||
}
|
||||
if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) {
|
||||
return false
|
||||
}
|
||||
if m.matchOrigin {
|
||||
if want.Origin != got.Origin {
|
||||
return false
|
||||
}
|
||||
if m.requireOriginWhenInvalid && want.Type == ErrorTypeInvalid {
|
||||
if want.Origin == "" || got.Origin == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if m.matchDetail != nil && !m.matchDetail(want.Detail, got.Detail) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Render returns a string representation of the specified Error object,
|
||||
// according to the criteria configured in the ErrorMatcher.
|
||||
func (m ErrorMatcher) Render(e *Error) string {
|
||||
buf := strings.Builder{}
|
||||
|
||||
comma := func() {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
}
|
||||
|
||||
if m.matchType {
|
||||
comma()
|
||||
buf.WriteString(fmt.Sprintf("Type=%q", e.Type))
|
||||
}
|
||||
if m.matchField {
|
||||
comma()
|
||||
buf.WriteString(fmt.Sprintf("Field=%q", e.Field))
|
||||
}
|
||||
if m.matchValue {
|
||||
comma()
|
||||
buf.WriteString(fmt.Sprintf("Value=%v", e.BadValue))
|
||||
}
|
||||
if m.matchOrigin || m.requireOriginWhenInvalid && e.Type == ErrorTypeInvalid {
|
||||
comma()
|
||||
buf.WriteString(fmt.Sprintf("Origin=%q", e.Origin))
|
||||
}
|
||||
if m.matchDetail != nil {
|
||||
comma()
|
||||
buf.WriteString(fmt.Sprintf("Detail=%q", e.Detail))
|
||||
}
|
||||
return "{" + buf.String() + "}"
|
||||
}
|
||||
|
||||
// Exactly returns a derived ErrorMatcher which matches all fields exactly.
|
||||
func (m ErrorMatcher) Exactly() ErrorMatcher {
|
||||
return m.ByType().ByField().ByValue().ByOrigin().ByDetailExact()
|
||||
}
|
||||
|
||||
// ByType returns a derived ErrorMatcher which also matches by type.
|
||||
func (m ErrorMatcher) ByType() ErrorMatcher {
|
||||
m.matchType = true
|
||||
return m
|
||||
}
|
||||
|
||||
// ByField returns a derived ErrorMatcher which also matches by field path.
|
||||
func (m ErrorMatcher) ByField() ErrorMatcher {
|
||||
m.matchField = true
|
||||
return m
|
||||
}
|
||||
|
||||
// ByValue returns a derived ErrorMatcher which also matches by the errant
|
||||
// value.
|
||||
func (m ErrorMatcher) ByValue() ErrorMatcher {
|
||||
m.matchValue = true
|
||||
return m
|
||||
}
|
||||
|
||||
// ByOrigin returns a derived ErrorMatcher which also matches by the origin.
|
||||
func (m ErrorMatcher) ByOrigin() ErrorMatcher {
|
||||
m.matchOrigin = true
|
||||
return m
|
||||
}
|
||||
|
||||
// RequireOriginWhenInvalid returns a derived ErrorMatcher which also requires
|
||||
// the Origin field to be set when the Type is Invalid and the matcher is
|
||||
// matching by Origin.
|
||||
func (m ErrorMatcher) RequireOriginWhenInvalid() ErrorMatcher {
|
||||
m.requireOriginWhenInvalid = true
|
||||
return m
|
||||
}
|
||||
|
||||
// ByDetailExact returns a derived ErrorMatcher which also matches errors by
|
||||
// the exact detail string.
|
||||
func (m ErrorMatcher) ByDetailExact() ErrorMatcher {
|
||||
m.matchDetail = func(want, got string) bool {
|
||||
return got == want
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// ByDetailSubstring returns a derived ErrorMatcher which also matches errors
|
||||
// by a substring of the detail string.
|
||||
func (m ErrorMatcher) ByDetailSubstring() ErrorMatcher {
|
||||
m.matchDetail = func(want, got string) bool {
|
||||
return strings.Contains(got, want)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// ByDetailRegexp returns a derived ErrorMatcher which also matches errors by a
|
||||
// regular expression of the detail string, where the "want" string is assumed
|
||||
// to be a valid regular expression.
|
||||
func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher {
|
||||
m.matchDetail = func(want, got string) bool {
|
||||
return regexp.MustCompile(want).MatchString(got)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// TestIntf lets users pass a testing.T while not coupling this package to Go's
|
||||
// testing package.
|
||||
type TestIntf interface {
|
||||
Helper()
|
||||
Errorf(format string, args ...any)
|
||||
Logf(format string, args ...any)
|
||||
}
|
||||
|
||||
// Test compares two ErrorLists by the criteria configured in this matcher, and
|
||||
// fails the test if they don't match. If a given "want" error matches multiple
|
||||
// "got" errors, they will all be consumed. This might be OK (e.g. if there are
|
||||
// multiple errors on the same field from the same origin) or it might be an
|
||||
// insufficiently specific matcher, so these will be logged.
|
||||
func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) {
|
||||
tb.Helper()
|
||||
|
||||
remaining := got
|
||||
for _, w := range want {
|
||||
tmp := make(ErrorList, 0, len(remaining))
|
||||
n := 0
|
||||
for _, g := range remaining {
|
||||
if m.Matches(w, g) {
|
||||
n++
|
||||
} else {
|
||||
tmp = append(tmp, g)
|
||||
}
|
||||
}
|
||||
if n == 0 {
|
||||
tb.Errorf("expected an error matching:\n%s", m.Render(w))
|
||||
} else if n > 1 {
|
||||
// This is not necessarily and error, but it's worth logging in
|
||||
// case it's not what the test author intended.
|
||||
tb.Logf("multiple errors matched:\n%s", m.Render(w))
|
||||
}
|
||||
remaining = tmp
|
||||
}
|
||||
if len(remaining) > 0 {
|
||||
for _, e := range remaining {
|
||||
exactly := m.Exactly() // makes a copy
|
||||
tb.Errorf("unmatched error:\n%s", exactly.Render(e))
|
||||
}
|
||||
}
|
||||
}
|
132
vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
generated
vendored
132
vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
generated
vendored
@ -33,13 +33,35 @@ type Error struct {
|
||||
Field string
|
||||
BadValue interface{}
|
||||
Detail string
|
||||
|
||||
// Origin uniquely identifies where this error was generated from. It is used in testing to
|
||||
// compare expected errors against actual errors without relying on exact detail string matching.
|
||||
// This allows tests to verify the correct validation logic triggered the error
|
||||
// regardless of how the error message might be formatted or localized.
|
||||
//
|
||||
// The value should be either:
|
||||
// - A simple camelCase identifier (e.g., "maximum", "maxItems")
|
||||
// - A structured format using "format=<dash-style-identifier>" for validation errors related to specific formats
|
||||
// (e.g., "format=dns-label", "format=qualified-name")
|
||||
//
|
||||
// If the Origin corresponds to an existing declarative validation tag or JSON Schema keyword,
|
||||
// use that same name for consistency.
|
||||
//
|
||||
// Origin should be set in the most deeply nested validation function that
|
||||
// can still identify the unique source of the error.
|
||||
Origin string
|
||||
|
||||
// CoveredByDeclarative is true when this error is covered by declarative
|
||||
// validation. This field is to identify errors from imperative validation
|
||||
// that should also be caught by declarative validation.
|
||||
CoveredByDeclarative bool
|
||||
}
|
||||
|
||||
var _ error = &Error{}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (v *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Field, e.ErrorBody())
|
||||
}
|
||||
|
||||
type OmitValueType struct{}
|
||||
@ -48,21 +70,21 @@ var omitValue = OmitValueType{}
|
||||
|
||||
// ErrorBody returns the error message without the field name. This is useful
|
||||
// for building nice-looking higher-level error reporting.
|
||||
func (v *Error) ErrorBody() string {
|
||||
func (e *Error) ErrorBody() string {
|
||||
var s string
|
||||
switch {
|
||||
case v.Type == ErrorTypeRequired:
|
||||
s = v.Type.String()
|
||||
case v.Type == ErrorTypeForbidden:
|
||||
s = v.Type.String()
|
||||
case v.Type == ErrorTypeTooLong:
|
||||
s = v.Type.String()
|
||||
case v.Type == ErrorTypeInternal:
|
||||
s = v.Type.String()
|
||||
case v.BadValue == omitValue:
|
||||
s = v.Type.String()
|
||||
case e.Type == ErrorTypeRequired:
|
||||
s = e.Type.String()
|
||||
case e.Type == ErrorTypeForbidden:
|
||||
s = e.Type.String()
|
||||
case e.Type == ErrorTypeTooLong:
|
||||
s = e.Type.String()
|
||||
case e.Type == ErrorTypeInternal:
|
||||
s = e.Type.String()
|
||||
case e.BadValue == omitValue:
|
||||
s = e.Type.String()
|
||||
default:
|
||||
value := v.BadValue
|
||||
value := e.BadValue
|
||||
valueType := reflect.TypeOf(value)
|
||||
if value == nil || valueType == nil {
|
||||
value = "null"
|
||||
@ -76,26 +98,38 @@ func (v *Error) ErrorBody() string {
|
||||
switch t := value.(type) {
|
||||
case int64, int32, float64, float32, bool:
|
||||
// use simple printer for simple types
|
||||
s = fmt.Sprintf("%s: %v", v.Type, value)
|
||||
s = fmt.Sprintf("%s: %v", e.Type, value)
|
||||
case string:
|
||||
s = fmt.Sprintf("%s: %q", v.Type, t)
|
||||
s = fmt.Sprintf("%s: %q", e.Type, t)
|
||||
case fmt.Stringer:
|
||||
// anything that defines String() is better than raw struct
|
||||
s = fmt.Sprintf("%s: %s", v.Type, t.String())
|
||||
s = fmt.Sprintf("%s: %s", e.Type, t.String())
|
||||
default:
|
||||
// fallback to raw struct
|
||||
// TODO: internal types have panic guards against json.Marshalling to prevent
|
||||
// accidental use of internal types in external serialized form. For now, use
|
||||
// %#v, although it would be better to show a more expressive output in the future
|
||||
s = fmt.Sprintf("%s: %#v", v.Type, value)
|
||||
s = fmt.Sprintf("%s: %#v", e.Type, value)
|
||||
}
|
||||
}
|
||||
if len(v.Detail) != 0 {
|
||||
s += fmt.Sprintf(": %s", v.Detail)
|
||||
if len(e.Detail) != 0 {
|
||||
s += fmt.Sprintf(": %s", e.Detail)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// WithOrigin adds origin information to the FieldError
|
||||
func (e *Error) WithOrigin(o string) *Error {
|
||||
e.Origin = o
|
||||
return e
|
||||
}
|
||||
|
||||
// MarkCoveredByDeclarative marks the error as covered by declarative validation.
|
||||
func (e *Error) MarkCoveredByDeclarative() *Error {
|
||||
e.CoveredByDeclarative = true
|
||||
return e
|
||||
}
|
||||
|
||||
// ErrorType is a machine readable value providing more detail about why
|
||||
// a field is invalid. These values are expected to match 1-1 with
|
||||
// CauseType in api/types.go.
|
||||
@ -169,32 +203,32 @@ func (t ErrorType) String() string {
|
||||
|
||||
// TypeInvalid returns a *Error indicating "type is invalid"
|
||||
func TypeInvalid(field *Path, value interface{}, detail string) *Error {
|
||||
return &Error{ErrorTypeTypeInvalid, field.String(), value, detail}
|
||||
return &Error{ErrorTypeTypeInvalid, field.String(), value, detail, "", false}
|
||||
}
|
||||
|
||||
// NotFound returns a *Error indicating "value not found". This is
|
||||
// used to report failure to find a requested value (e.g. looking up an ID).
|
||||
func NotFound(field *Path, value interface{}) *Error {
|
||||
return &Error{ErrorTypeNotFound, field.String(), value, ""}
|
||||
return &Error{ErrorTypeNotFound, field.String(), value, "", "", false}
|
||||
}
|
||||
|
||||
// Required returns a *Error indicating "value required". This is used
|
||||
// to report required values that are not provided (e.g. empty strings, null
|
||||
// values, or empty arrays).
|
||||
func Required(field *Path, detail string) *Error {
|
||||
return &Error{ErrorTypeRequired, field.String(), "", detail}
|
||||
return &Error{ErrorTypeRequired, field.String(), "", detail, "", false}
|
||||
}
|
||||
|
||||
// Duplicate returns a *Error indicating "duplicate value". This is
|
||||
// used to report collisions of values that must be unique (e.g. names or IDs).
|
||||
func Duplicate(field *Path, value interface{}) *Error {
|
||||
return &Error{ErrorTypeDuplicate, field.String(), value, ""}
|
||||
return &Error{ErrorTypeDuplicate, field.String(), value, "", "", false}
|
||||
}
|
||||
|
||||
// Invalid returns a *Error indicating "invalid value". This is used
|
||||
// to report malformed values (e.g. failed regex match, too long, out of bounds).
|
||||
func Invalid(field *Path, value interface{}, detail string) *Error {
|
||||
return &Error{ErrorTypeInvalid, field.String(), value, detail}
|
||||
return &Error{ErrorTypeInvalid, field.String(), value, detail, "", false}
|
||||
}
|
||||
|
||||
// NotSupported returns a *Error indicating "unsupported value".
|
||||
@ -209,7 +243,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
|
||||
}
|
||||
detail = "supported values: " + strings.Join(quotedValues, ", ")
|
||||
}
|
||||
return &Error{ErrorTypeNotSupported, field.String(), value, detail}
|
||||
return &Error{ErrorTypeNotSupported, field.String(), value, detail, "", false}
|
||||
}
|
||||
|
||||
// Forbidden returns a *Error indicating "forbidden". This is used to
|
||||
@ -217,7 +251,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
|
||||
// some conditions, but which are not permitted by current conditions (e.g.
|
||||
// security policy).
|
||||
func Forbidden(field *Path, detail string) *Error {
|
||||
return &Error{ErrorTypeForbidden, field.String(), "", detail}
|
||||
return &Error{ErrorTypeForbidden, field.String(), "", detail, "", false}
|
||||
}
|
||||
|
||||
// TooLong returns a *Error indicating "too long". This is used to report that
|
||||
@ -231,7 +265,7 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error {
|
||||
} else {
|
||||
msg = "value is too long"
|
||||
}
|
||||
return &Error{ErrorTypeTooLong, field.String(), "<value omitted>", msg}
|
||||
return &Error{ErrorTypeTooLong, field.String(), "<value omitted>", msg, "", false}
|
||||
}
|
||||
|
||||
// TooLongMaxLength returns a *Error indicating "too long".
|
||||
@ -259,14 +293,14 @@ func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
|
||||
actual = omitValue
|
||||
}
|
||||
|
||||
return &Error{ErrorTypeTooMany, field.String(), actual, msg}
|
||||
return &Error{ErrorTypeTooMany, field.String(), actual, msg, "", false}
|
||||
}
|
||||
|
||||
// InternalError returns a *Error indicating "internal error". This is used
|
||||
// to signal that an error was found that was not directly related to user
|
||||
// input. The err argument must be non-nil.
|
||||
func InternalError(field *Path, err error) *Error {
|
||||
return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
|
||||
return &Error{ErrorTypeInternal, field.String(), nil, err.Error(), "", false}
|
||||
}
|
||||
|
||||
// ErrorList holds a set of Errors. It is plausible that we might one day have
|
||||
@ -285,6 +319,22 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
|
||||
}
|
||||
}
|
||||
|
||||
// WithOrigin sets the origin for all errors in the list and returns the updated list.
|
||||
func (list ErrorList) WithOrigin(origin string) ErrorList {
|
||||
for _, err := range list {
|
||||
err.Origin = origin
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// MarkCoveredByDeclarative marks all errors in the list as covered by declarative validation.
|
||||
func (list ErrorList) MarkCoveredByDeclarative() ErrorList {
|
||||
for _, err := range list {
|
||||
err.CoveredByDeclarative = true
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// ToAggregate converts the ErrorList into an errors.Aggregate.
|
||||
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
|
||||
if len(list) == 0 {
|
||||
@ -321,3 +371,25 @@ func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
|
||||
// FilterOut takes an Aggregate and returns an Aggregate
|
||||
return fromAggregate(err.(utilerrors.Aggregate))
|
||||
}
|
||||
|
||||
// ExtractCoveredByDeclarative returns a new ErrorList containing only the errors that should be covered by declarative validation.
|
||||
func (list ErrorList) ExtractCoveredByDeclarative() ErrorList {
|
||||
newList := ErrorList{}
|
||||
for _, err := range list {
|
||||
if err.CoveredByDeclarative {
|
||||
newList = append(newList, err)
|
||||
}
|
||||
}
|
||||
return newList
|
||||
}
|
||||
|
||||
// RemoveCoveredByDeclarative returns a new ErrorList containing only the errors that should not be covered by declarative validation.
|
||||
func (list ErrorList) RemoveCoveredByDeclarative() ErrorList {
|
||||
newList := ErrorList{}
|
||||
for _, err := range list {
|
||||
if !err.CoveredByDeclarative {
|
||||
newList = append(newList, err)
|
||||
}
|
||||
}
|
||||
return newList
|
||||
}
|
||||
|
278
vendor/k8s.io/apimachinery/pkg/util/validation/ip.go
generated
vendored
Normal file
278
vendor/k8s.io/apimachinery/pkg/util/validation/ip.go
generated
vendored
Normal file
@ -0,0 +1,278 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"slices"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/klog/v2"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
func parseIP(fldPath *field.Path, value string, strictValidation bool) (net.IP, field.ErrorList) {
|
||||
var allErrors field.ErrorList
|
||||
|
||||
ip := netutils.ParseIPSloppy(value)
|
||||
if ip == nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"))
|
||||
return nil, allErrors
|
||||
}
|
||||
|
||||
if strictValidation {
|
||||
addr, err := netip.ParseAddr(value)
|
||||
if err != nil {
|
||||
// If netutils.ParseIPSloppy parsed it, but netip.ParseAddr
|
||||
// doesn't, then it must have illegal leading 0s.
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s"))
|
||||
}
|
||||
if addr.Is4In6() {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not be an IPv4-mapped IPv6 address"))
|
||||
}
|
||||
}
|
||||
|
||||
return ip, allErrors
|
||||
}
|
||||
|
||||
// IsValidIPForLegacyField tests that the argument is a valid IP address for a "legacy"
|
||||
// API field that predates strict IP validation. In particular, this allows IPs that are
|
||||
// not in canonical form (e.g., "FE80:0:0:0:0:0:0:0abc" instead of "fe80::abc").
|
||||
//
|
||||
// If strictValidation is false, this also allows IPs in certain invalid or ambiguous
|
||||
// formats:
|
||||
//
|
||||
// 1. IPv4 IPs are allowed to have leading "0"s in octets (e.g. "010.002.003.004").
|
||||
// Historically, net.ParseIP (and later netutils.ParseIPSloppy) simply ignored leading
|
||||
// "0"s in IPv4 addresses, but most libc-based software treats 0-prefixed IPv4 octets
|
||||
// as octal, meaning different software might interpret the same string as two
|
||||
// different IPs, potentially leading to security issues. (Current net.ParseIP and
|
||||
// netip.ParseAddr simply reject inputs with leading "0"s.)
|
||||
//
|
||||
// 2. IPv4-mapped IPv6 IPs (e.g. "::ffff:1.2.3.4") are allowed. These can also lead to
|
||||
// different software interpreting the value in different ways, because they may be
|
||||
// treated as IPv4 by some software and IPv6 by other software. (net.ParseIP and
|
||||
// netip.ParseAddr both allow these, but there are no use cases for representing IPv4
|
||||
// addresses as IPv4-mapped IPv6 addresses in Kubernetes.)
|
||||
//
|
||||
// Alternatively, when validating an update to an existing field, you can pass a list of
|
||||
// IP values from the old object that should be accepted if they appear in the new object
|
||||
// even if they are not valid.
|
||||
//
|
||||
// This function should only be used to validate the existing fields that were
|
||||
// historically validated in this way, and strictValidation should be true unless the
|
||||
// StrictIPCIDRValidation feature gate is disabled. Use IsValidIP for parsing new fields.
|
||||
func IsValidIPForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldIPs []string) field.ErrorList {
|
||||
if slices.Contains(validOldIPs, value) {
|
||||
return nil
|
||||
}
|
||||
_, allErrors := parseIP(fldPath, value, strictValidation)
|
||||
return allErrors.WithOrigin("format=ip-sloppy")
|
||||
}
|
||||
|
||||
// IsValidIP tests that the argument is a valid IP address, according to current
|
||||
// Kubernetes standards for IP address validation.
|
||||
func IsValidIP(fldPath *field.Path, value string) field.ErrorList {
|
||||
ip, allErrors := parseIP(fldPath, value, true)
|
||||
if len(allErrors) != 0 {
|
||||
return allErrors.WithOrigin("format=ip-strict")
|
||||
}
|
||||
|
||||
if value != ip.String() {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ip.String())))
|
||||
}
|
||||
return allErrors.WithOrigin("format=ip-strict")
|
||||
}
|
||||
|
||||
// GetWarningsForIP returns warnings for IP address values in non-standard forms. This
|
||||
// should only be used with fields that are validated with IsValidIPForLegacyField().
|
||||
func GetWarningsForIP(fldPath *field.Path, value string) []string {
|
||||
ip := netutils.ParseIPSloppy(value)
|
||||
if ip == nil {
|
||||
klog.ErrorS(nil, "GetWarningsForIP called on value that was not validated with IsValidIPForLegacyField", "field", fldPath, "value", value)
|
||||
return nil
|
||||
}
|
||||
|
||||
addr, _ := netip.ParseAddr(value)
|
||||
if !addr.IsValid() || addr.Is4In6() {
|
||||
// This catches 2 cases: leading 0s (if ParseIPSloppy() accepted it but
|
||||
// ParseAddr() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
|
||||
// re-stringifying the net.IP value will give the preferred form.
|
||||
return []string{
|
||||
fmt.Sprintf("%s: non-standard IP address %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ip.String()),
|
||||
}
|
||||
}
|
||||
|
||||
// If ParseIPSloppy() and ParseAddr() both accept it then it's fully valid, though
|
||||
// it may be non-canonical.
|
||||
if addr.Is6() && addr.String() != value {
|
||||
return []string{
|
||||
fmt.Sprintf("%s: IPv6 address %q should be in RFC 5952 canonical format (%q)", fldPath, value, addr.String()),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseCIDR(fldPath *field.Path, value string, strictValidation bool) (*net.IPNet, field.ErrorList) {
|
||||
var allErrors field.ErrorList
|
||||
|
||||
_, ipnet, err := netutils.ParseCIDRSloppy(value)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)"))
|
||||
return nil, allErrors
|
||||
}
|
||||
|
||||
if strictValidation {
|
||||
prefix, err := netip.ParsePrefix(value)
|
||||
if err != nil {
|
||||
// If netutils.ParseCIDRSloppy parsed it, but netip.ParsePrefix
|
||||
// doesn't, then it must have illegal leading 0s (either in the
|
||||
// IP part or the prefix).
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s in IP or prefix length"))
|
||||
} else if prefix.Addr().Is4In6() {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have an IPv4-mapped IPv6 address"))
|
||||
} else if prefix.Addr() != prefix.Masked().Addr() {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have bits set beyond the prefix length"))
|
||||
}
|
||||
}
|
||||
|
||||
return ipnet, allErrors
|
||||
}
|
||||
|
||||
// IsValidCIDRForLegacyField tests that the argument is a valid CIDR value for a "legacy"
|
||||
// API field that predates strict IP validation. In particular, this allows IPs that are
|
||||
// not in canonical form (e.g., "FE80:0abc:0:0:0:0:0:0/64" instead of "fe80:abc::/64").
|
||||
//
|
||||
// If strictValidation is false, this also allows CIDR values in certain invalid or
|
||||
// ambiguous formats:
|
||||
//
|
||||
// 1. The IP part of the CIDR value is parsed as with IsValidIPForLegacyField with
|
||||
// strictValidation=false.
|
||||
//
|
||||
// 2. The CIDR value is allowed to be either a "subnet"/"mask" (with the lower bits after
|
||||
// the prefix length all being 0), or an "interface address" as with `ip addr` (with a
|
||||
// complete IP address and associated subnet length). With strict validation, the
|
||||
// value is required to be in "subnet"/"mask" form.
|
||||
//
|
||||
// 3. The prefix length is allowed to have leading 0s.
|
||||
//
|
||||
// Alternatively, when validating an update to an existing field, you can pass a list of
|
||||
// CIDR values from the old object that should be accepted if they appear in the new
|
||||
// object even if they are not valid.
|
||||
//
|
||||
// This function should only be used to validate the existing fields that were
|
||||
// historically validated in this way, and strictValidation should be true unless the
|
||||
// StrictIPCIDRValidation feature gate is disabled. Use IsValidCIDR or
|
||||
// IsValidInterfaceAddress for parsing new fields.
|
||||
func IsValidCIDRForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldCIDRs []string) field.ErrorList {
|
||||
if slices.Contains(validOldCIDRs, value) {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, allErrors := parseCIDR(fldPath, value, strictValidation)
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// IsValidCIDR tests that the argument is a valid CIDR value, according to current
|
||||
// Kubernetes standards for CIDR validation. This function is only for
|
||||
// "subnet"/"mask"-style CIDR values (e.g., "192.168.1.0/24", with no bits set beyond the
|
||||
// prefix length). Use IsValidInterfaceAddress for "ifaddr"-style CIDR values.
|
||||
func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList {
|
||||
ipnet, allErrors := parseCIDR(fldPath, value, true)
|
||||
if len(allErrors) != 0 {
|
||||
return allErrors
|
||||
}
|
||||
|
||||
if value != ipnet.String() {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ipnet.String())))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// GetWarningsForCIDR returns warnings for CIDR values in non-standard forms. This should
|
||||
// only be used with fields that are validated with IsValidCIDRForLegacyField().
|
||||
func GetWarningsForCIDR(fldPath *field.Path, value string) []string {
|
||||
ip, ipnet, err := netutils.ParseCIDRSloppy(value)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "GetWarningsForCIDR called on value that was not validated with IsValidCIDRForLegacyField", "field", fldPath, "value", value)
|
||||
return nil
|
||||
}
|
||||
|
||||
var warnings []string
|
||||
|
||||
// Check for bits set after prefix length
|
||||
if !ip.Equal(ipnet.IP) {
|
||||
_, addrlen := ipnet.Mask.Size()
|
||||
singleIPCIDR := fmt.Sprintf("%s/%d", ip.String(), addrlen)
|
||||
warnings = append(warnings,
|
||||
fmt.Sprintf("%s: CIDR value %q is ambiguous in this context (should be %q or %q?)", fldPath, value, ipnet.String(), singleIPCIDR),
|
||||
)
|
||||
}
|
||||
|
||||
prefix, _ := netip.ParsePrefix(value)
|
||||
addr := prefix.Addr()
|
||||
if !prefix.IsValid() || addr.Is4In6() {
|
||||
// This catches 2 cases: leading 0s (if ParseCIDRSloppy() accepted it but
|
||||
// ParsePrefix() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
|
||||
// re-stringifying the net.IPNet value will give the preferred form.
|
||||
warnings = append(warnings,
|
||||
fmt.Sprintf("%s: non-standard CIDR value %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ipnet.String()),
|
||||
)
|
||||
}
|
||||
|
||||
// If ParseCIDRSloppy() and ParsePrefix() both accept it then it's fully valid,
|
||||
// though it may be non-canonical. But only check this if there are no other
|
||||
// warnings, since either of the other warnings would also cause a round-trip
|
||||
// failure.
|
||||
if len(warnings) == 0 && addr.Is6() && prefix.String() != value {
|
||||
warnings = append(warnings,
|
||||
fmt.Sprintf("%s: IPv6 CIDR value %q should be in RFC 5952 canonical format (%q)", fldPath, value, prefix.String()),
|
||||
)
|
||||
}
|
||||
|
||||
return warnings
|
||||
}
|
||||
|
||||
// IsValidInterfaceAddress tests that the argument is a valid "ifaddr"-style CIDR value in
|
||||
// canonical form (e.g., "192.168.1.5/24", with a complete IP address and associated
|
||||
// subnet length). Use IsValidCIDR for "subnet"/"mask"-style CIDR values (e.g.,
|
||||
// "192.168.1.0/24").
|
||||
func IsValidInterfaceAddress(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
ip, ipnet, err := netutils.ParseCIDRSloppy(value)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid address in CIDR form, (e.g. 10.9.8.7/24 or 2001:db8::1/64)"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// The canonical form of `value` is not `ipnet.String()`, because `ipnet` doesn't
|
||||
// include the bits after the prefix. We need to construct the canonical form
|
||||
// ourselves from `ip` and `ipnet.Mask`.
|
||||
maskSize, _ := ipnet.Mask.Size()
|
||||
if netutils.IsIPv4(ip) && maskSize > net.IPv4len*8 {
|
||||
// "::ffff:192.168.0.1/120" -> "192.168.0.1/24"
|
||||
maskSize -= (net.IPv6len - net.IPv4len) * 8
|
||||
}
|
||||
canonical := fmt.Sprintf("%s/%d", ip.String(), maskSize)
|
||||
if value != canonical {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", canonical)))
|
||||
}
|
||||
return allErrors
|
||||
}
|
40
vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
generated
vendored
40
vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"unicode"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const qnameCharFmt string = "[A-Za-z0-9]"
|
||||
@ -369,45 +368,6 @@ func IsValidPortName(port string) []string {
|
||||
return errs
|
||||
}
|
||||
|
||||
// IsValidIP tests that the argument is a valid IP address.
|
||||
func IsValidIP(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
if netutils.ParseIPSloppy(value) == nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// IsValidIPv4Address tests that the argument is a valid IPv4 address.
|
||||
func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
ip := netutils.ParseIPSloppy(value)
|
||||
if ip == nil || ip.To4() == nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// IsValidIPv6Address tests that the argument is a valid IPv6 address.
|
||||
func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
ip := netutils.ParseIPSloppy(value)
|
||||
if ip == nil || ip.To4() != nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// IsValidCIDR tests that the argument is a valid CIDR value.
|
||||
func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
_, _, err := netutils.ParseCIDRSloppy(value)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)"))
|
||||
}
|
||||
return allErrors
|
||||
}
|
||||
|
||||
const percentFmt string = "[0-9]+%"
|
||||
const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
|
||||
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/util/version/doc.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/version/doc.go
generated
vendored
@ -15,4 +15,4 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Package version provides utilities for version number comparisons
|
||||
package version // import "k8s.io/apimachinery/pkg/util/version"
|
||||
package version
|
||||
|
18
vendor/k8s.io/apimachinery/pkg/util/version/version.go
generated
vendored
18
vendor/k8s.io/apimachinery/pkg/util/version/version.go
generated
vendored
@ -33,7 +33,6 @@ type Version struct {
|
||||
semver bool
|
||||
preRelease string
|
||||
buildMetadata string
|
||||
info apimachineryversion.Info
|
||||
}
|
||||
|
||||
var (
|
||||
@ -456,27 +455,28 @@ func (v *Version) Compare(other string) (int, error) {
|
||||
return v.compareInternal(ov), nil
|
||||
}
|
||||
|
||||
// WithInfo returns copy of the version object with requested info
|
||||
// WithInfo returns copy of the version object.
|
||||
// Deprecated: The Info field has been removed from the Version struct. This method no longer modifies the Version object.
|
||||
func (v *Version) WithInfo(info apimachineryversion.Info) *Version {
|
||||
result := *v
|
||||
result.info = info
|
||||
return &result
|
||||
}
|
||||
|
||||
// Info returns the version information of a component.
|
||||
// Deprecated: Use Info() from effective version instead.
|
||||
func (v *Version) Info() *apimachineryversion.Info {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
// in case info is empty, or the major and minor in info is different from the actual major and minor
|
||||
v.info.Major = itoa(v.Major())
|
||||
v.info.Minor = itoa(v.Minor())
|
||||
if v.info.GitVersion == "" {
|
||||
v.info.GitVersion = v.String()
|
||||
return &apimachineryversion.Info{
|
||||
Major: Itoa(v.Major()),
|
||||
Minor: Itoa(v.Minor()),
|
||||
GitVersion: v.String(),
|
||||
}
|
||||
return &v.info
|
||||
}
|
||||
|
||||
func itoa(i uint) string {
|
||||
func Itoa(i uint) string {
|
||||
if i == 0 {
|
||||
return ""
|
||||
}
|
||||
|
50
vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
generated
vendored
50
vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
generated
vendored
@ -157,6 +157,8 @@ func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) Dela
|
||||
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
|
||||
// with sliding = true (which means the timer for period starts after the f
|
||||
// completes).
|
||||
//
|
||||
// Contextual logging: UntilWithContext should be used instead of Until in code which supports contextual logging.
|
||||
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, true, stopCh)
|
||||
}
|
||||
@ -176,6 +178,8 @@ func UntilWithContext(ctx context.Context, f func(context.Context), period time.
|
||||
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
|
||||
// factor, with sliding = false (meaning the timer for period starts at the same
|
||||
// time as the function starts).
|
||||
//
|
||||
// Contextual logging: NonSlidingUntilWithContext should be used instead of NonSlidingUntil in code which supports contextual logging.
|
||||
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, false, stopCh)
|
||||
}
|
||||
@ -200,19 +204,44 @@ func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), pe
|
||||
//
|
||||
// Close stopCh to stop. f may not be invoked if stop channel is already
|
||||
// closed. Pass NeverStop to if you don't want it stop.
|
||||
//
|
||||
// Contextual logging: JitterUntilWithContext should be used instead of JitterUntil in code which supports contextual logging.
|
||||
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
|
||||
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
|
||||
}
|
||||
|
||||
// JitterUntilWithContext loops until context is done, running f every period.
|
||||
//
|
||||
// If jitterFactor is positive, the period is jittered before every run of f.
|
||||
// If jitterFactor is not positive, the period is unchanged and not jittered.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Cancel context to stop. f may not be invoked if context is already done.
|
||||
func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
|
||||
BackoffUntilWithContext(ctx, f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding)
|
||||
}
|
||||
|
||||
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Contextual logging: BackoffUntilWithContext should be used instead of BackoffUntil in code which supports contextual logging.
|
||||
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
|
||||
BackoffUntilWithContext(ContextForChannel(stopCh), func(context.Context) { f() }, backoff, sliding)
|
||||
}
|
||||
|
||||
// BackoffUntilWithContext loops until context is done, run f every duration given by BackoffManager.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
func BackoffUntilWithContext(ctx context.Context, f func(ctx context.Context), backoff BackoffManager, sliding bool) {
|
||||
var t clock.Timer
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
@ -222,8 +251,8 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
|
||||
}
|
||||
|
||||
func() {
|
||||
defer runtime.HandleCrash()
|
||||
f()
|
||||
defer runtime.HandleCrashWithContext(ctx)
|
||||
f(ctx)
|
||||
}()
|
||||
|
||||
if sliding {
|
||||
@ -236,7 +265,7 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
|
||||
// In order to mitigate we re-check stopCh at the beginning
|
||||
// of every loop to prevent extra executions of f().
|
||||
select {
|
||||
case <-stopCh:
|
||||
case <-ctx.Done():
|
||||
if !t.Stop() {
|
||||
<-t.C()
|
||||
}
|
||||
@ -246,19 +275,6 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
|
||||
}
|
||||
}
|
||||
|
||||
// JitterUntilWithContext loops until context is done, running f every period.
|
||||
//
|
||||
// If jitterFactor is positive, the period is jittered before every run of f.
|
||||
// If jitterFactor is not positive, the period is unchanged and not jittered.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Cancel context to stop. f may not be invoked if context is already expired.
|
||||
func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
|
||||
JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
|
||||
}
|
||||
|
||||
// backoffManager provides simple backoff behavior in a threadsafe manner to a caller.
|
||||
type backoffManager struct {
|
||||
backoff Backoff
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package wait provides tools for polling or listening for changes
|
||||
// to a condition.
|
||||
package wait // import "k8s.io/apimachinery/pkg/util/wait"
|
||||
package wait
|
||||
|
4
vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
generated
vendored
4
vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
generated
vendored
@ -49,7 +49,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding
|
||||
// if we haven't requested immediate execution, delay once
|
||||
if immediate {
|
||||
if ok, err := func() (bool, error) {
|
||||
defer runtime.HandleCrash()
|
||||
defer runtime.HandleCrashWithContext(ctx)
|
||||
return condition(ctx)
|
||||
}(); err != nil || ok {
|
||||
return err
|
||||
@ -83,7 +83,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding
|
||||
t.Next()
|
||||
}
|
||||
if ok, err := func() (bool, error) {
|
||||
defer runtime.HandleCrash()
|
||||
defer runtime.HandleCrashWithContext(ctx)
|
||||
return condition(ctx)
|
||||
}(); err != nil || ok {
|
||||
return err
|
||||
|
9
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
generated
vendored
9
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
generated
vendored
@ -80,6 +80,10 @@ func Forever(f func(), period time.Duration) {
|
||||
Until(f, period, NeverStop)
|
||||
}
|
||||
|
||||
// jitterRand is a dedicated random source for jitter calculations.
|
||||
// It defaults to rand.Float64, but is a package variable so it can be overridden to make unit tests deterministic.
|
||||
var jitterRand = rand.Float64
|
||||
|
||||
// Jitter returns a time.Duration between duration and duration + maxFactor *
|
||||
// duration.
|
||||
//
|
||||
@ -89,7 +93,7 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
if maxFactor <= 0.0 {
|
||||
maxFactor = 1.0
|
||||
}
|
||||
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
|
||||
wait := duration + time.Duration(jitterRand()*maxFactor*float64(duration))
|
||||
return wait
|
||||
}
|
||||
|
||||
@ -141,6 +145,7 @@ func (c channelContext) Value(key any) any { return nil }
|
||||
//
|
||||
// Deprecated: Will be removed when the legacy polling methods are removed.
|
||||
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
|
||||
//nolint:logcheck // Already deprecated.
|
||||
defer runtime.HandleCrash()
|
||||
return condition()
|
||||
}
|
||||
@ -150,7 +155,7 @@ func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
|
||||
//
|
||||
// Deprecated: Will be removed when the legacy polling methods are removed.
|
||||
func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
|
||||
defer runtime.HandleCrash()
|
||||
defer runtime.HandleCrashWithContext(ctx)
|
||||
return condition(ctx)
|
||||
}
|
||||
|
||||
|
163
vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
generated
vendored
163
vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
generated
vendored
@ -20,10 +20,12 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
jsonutil "k8s.io/apimachinery/pkg/util/json"
|
||||
|
||||
@ -92,7 +94,7 @@ func UnmarshalStrict(data []byte, v interface{}) error {
|
||||
// YAML decoding path is not used (so that error messages are
|
||||
// JSON specific).
|
||||
func ToJSON(data []byte) ([]byte, error) {
|
||||
if hasJSONPrefix(data) {
|
||||
if IsJSONBuffer(data) {
|
||||
return data, nil
|
||||
}
|
||||
return yaml.YAMLToJSON(data)
|
||||
@ -102,7 +104,8 @@ func ToJSON(data []byte) ([]byte, error) {
|
||||
// separating individual documents. It first converts the YAML
|
||||
// body to JSON, then unmarshals the JSON.
|
||||
type YAMLToJSONDecoder struct {
|
||||
reader Reader
|
||||
reader Reader
|
||||
inputOffset int
|
||||
}
|
||||
|
||||
// NewYAMLToJSONDecoder decodes YAML documents from the provided
|
||||
@ -121,7 +124,7 @@ func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
|
||||
// yaml.Unmarshal.
|
||||
func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
|
||||
bytes, err := d.reader.Read()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && err != io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
|
||||
@ -131,9 +134,14 @@ func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
|
||||
return YAMLSyntaxError{err}
|
||||
}
|
||||
}
|
||||
d.inputOffset += len(bytes)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *YAMLToJSONDecoder) InputOffset() int {
|
||||
return d.inputOffset
|
||||
}
|
||||
|
||||
// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
|
||||
// the data is not sufficient.
|
||||
type YAMLDecoder struct {
|
||||
@ -229,18 +237,20 @@ func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// decoder is a convenience interface for Decode.
|
||||
type decoder interface {
|
||||
Decode(into interface{}) error
|
||||
}
|
||||
|
||||
// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
|
||||
// YAML documents by sniffing for a leading { character.
|
||||
// YAMLOrJSONDecoder attempts to decode a stream of JSON or YAML documents.
|
||||
// While JSON is YAML, the way Go's JSON decode defines a multi-document stream
|
||||
// is a series of JSON objects (e.g. {}{}), but YAML defines a multi-document
|
||||
// stream as a series of documents separated by "---".
|
||||
//
|
||||
// This decoder will attempt to decode the stream as JSON first, and if that
|
||||
// fails, it will switch to YAML. Once it determines the stream is JSON (by
|
||||
// finding a non-YAML-delimited series of objects), it will not switch to YAML.
|
||||
// Once it switches to YAML it will not switch back to JSON.
|
||||
type YAMLOrJSONDecoder struct {
|
||||
r io.Reader
|
||||
bufferSize int
|
||||
|
||||
decoder decoder
|
||||
json *json.Decoder
|
||||
yaml *YAMLToJSONDecoder
|
||||
stream *StreamReader
|
||||
count int // how many objects have been decoded
|
||||
}
|
||||
|
||||
type JSONSyntaxError struct {
|
||||
@ -265,31 +275,108 @@ func (e YAMLSyntaxError) Error() string {
|
||||
// how far into the stream the decoder will look to figure out whether this
|
||||
// is a JSON stream (has whitespace followed by an open brace).
|
||||
func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
|
||||
return &YAMLOrJSONDecoder{
|
||||
r: r,
|
||||
bufferSize: bufferSize,
|
||||
d := &YAMLOrJSONDecoder{}
|
||||
|
||||
reader, _, mightBeJSON := GuessJSONStream(r, bufferSize)
|
||||
d.stream = reader
|
||||
if mightBeJSON {
|
||||
d.json = json.NewDecoder(reader)
|
||||
} else {
|
||||
d.yaml = NewYAMLToJSONDecoder(reader)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// Decode unmarshals the next object from the underlying stream into the
|
||||
// provide object, or returns an error.
|
||||
func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
|
||||
if d.decoder == nil {
|
||||
buffer, _, isJSON := GuessJSONStream(d.r, d.bufferSize)
|
||||
if isJSON {
|
||||
d.decoder = json.NewDecoder(buffer)
|
||||
// Because we don't know if this is a JSON or YAML stream, a failure from
|
||||
// both decoders is ambiguous. When in doubt, it will return the error from
|
||||
// the JSON decoder. Unfortunately, this means that if the first document
|
||||
// is invalid YAML, the error won't be awesome.
|
||||
// TODO: the errors from YAML are not great, we could improve them a lot.
|
||||
var firstErr error
|
||||
if d.json != nil {
|
||||
err := d.json.Decode(into)
|
||||
if err == nil {
|
||||
d.stream.Consume(int(d.json.InputOffset()) - d.stream.Consumed())
|
||||
d.count++
|
||||
return nil
|
||||
}
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
var syntax *json.SyntaxError
|
||||
if ok := errors.As(err, &syntax); ok {
|
||||
firstErr = JSONSyntaxError{
|
||||
Offset: syntax.Offset,
|
||||
Err: syntax,
|
||||
}
|
||||
} else {
|
||||
d.decoder = NewYAMLToJSONDecoder(buffer)
|
||||
firstErr = err
|
||||
}
|
||||
if d.count > 1 {
|
||||
// If we found 0 or 1 JSON object(s), this stream is still
|
||||
// ambiguous. But if we found more than 1 JSON object, then this
|
||||
// is an unambiguous JSON stream, and we should not switch to YAML.
|
||||
return err
|
||||
}
|
||||
// If JSON decoding hits the end of one object and then fails on the
|
||||
// next, it leaves any leading whitespace in the buffer, which can
|
||||
// confuse the YAML decoder. We just eat any whitespace we find, up to
|
||||
// and including the first newline.
|
||||
d.stream.Rewind()
|
||||
if err := d.consumeWhitespace(); err == nil {
|
||||
d.yaml = NewYAMLToJSONDecoder(d.stream)
|
||||
}
|
||||
d.json = nil
|
||||
}
|
||||
if d.yaml != nil {
|
||||
err := d.yaml.Decode(into)
|
||||
if err == nil {
|
||||
d.stream.Consume(d.yaml.InputOffset() - d.stream.Consumed())
|
||||
d.count++
|
||||
return nil
|
||||
}
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
err := d.decoder.Decode(into)
|
||||
if syntax, ok := err.(*json.SyntaxError); ok {
|
||||
return JSONSyntaxError{
|
||||
Offset: syntax.Offset,
|
||||
Err: syntax,
|
||||
if firstErr != nil {
|
||||
return firstErr
|
||||
}
|
||||
return fmt.Errorf("decoding failed as both JSON and YAML")
|
||||
}
|
||||
|
||||
func (d *YAMLOrJSONDecoder) consumeWhitespace() error {
|
||||
consumed := 0
|
||||
for {
|
||||
buf, err := d.stream.ReadN(4)
|
||||
if err != nil && err == io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
r, sz := utf8.DecodeRune(buf)
|
||||
if r == utf8.RuneError || sz == 0 {
|
||||
return fmt.Errorf("invalid utf8 rune")
|
||||
}
|
||||
d.stream.RewindN(len(buf) - sz)
|
||||
if !unicode.IsSpace(r) {
|
||||
d.stream.RewindN(sz)
|
||||
d.stream.Consume(consumed)
|
||||
return nil
|
||||
}
|
||||
if r == '\n' {
|
||||
d.stream.Consume(consumed)
|
||||
return nil
|
||||
}
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
type Reader interface {
|
||||
@ -311,7 +398,7 @@ func (r *YAMLReader) Read() ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
for {
|
||||
line, err := r.reader.Read()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && err != io.EOF { //nolint:errorlint
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -329,11 +416,11 @@ func (r *YAMLReader) Read() ([]byte, error) {
|
||||
if buffer.Len() != 0 {
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
if buffer.Len() != 0 {
|
||||
// If we're at EOF, we have a final, non-terminated line. Return it.
|
||||
return buffer.Bytes(), nil
|
||||
@ -369,26 +456,20 @@ func (r *LineReader) Read() ([]byte, error) {
|
||||
// GuessJSONStream scans the provided reader up to size, looking
|
||||
// for an open brace indicating this is JSON. It will return the
|
||||
// bufio.Reader it creates for the consumer.
|
||||
func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
|
||||
buffer := bufio.NewReaderSize(r, size)
|
||||
func GuessJSONStream(r io.Reader, size int) (*StreamReader, []byte, bool) {
|
||||
buffer := NewStreamReader(r, size)
|
||||
b, _ := buffer.Peek(size)
|
||||
return buffer, b, hasJSONPrefix(b)
|
||||
return buffer, b, IsJSONBuffer(b)
|
||||
}
|
||||
|
||||
// IsJSONBuffer scans the provided buffer, looking
|
||||
// for an open brace indicating this is JSON.
|
||||
func IsJSONBuffer(buf []byte) bool {
|
||||
return hasJSONPrefix(buf)
|
||||
return hasPrefix(buf, jsonPrefix)
|
||||
}
|
||||
|
||||
var jsonPrefix = []byte("{")
|
||||
|
||||
// hasJSONPrefix returns true if the provided buffer appears to start with
|
||||
// a JSON open brace.
|
||||
func hasJSONPrefix(buf []byte) bool {
|
||||
return hasPrefix(buf, jsonPrefix)
|
||||
}
|
||||
|
||||
// Return true if the first non-whitespace bytes in buf is
|
||||
// prefix.
|
||||
func hasPrefix(buf []byte, prefix []byte) bool {
|
||||
|
130
vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
generated
vendored
Normal file
130
vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package yaml
|
||||
|
||||
import "io"
|
||||
|
||||
// StreamReader is a reader designed for consuming streams of variable-length
|
||||
// messages. It buffers data until it is explicitly consumed, and can be
|
||||
// rewound to re-read previous data.
|
||||
type StreamReader struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
head int // current read offset into buf
|
||||
ttlConsumed int // number of bytes which have been consumed
|
||||
}
|
||||
|
||||
// NewStreamReader creates a new StreamReader wrapping the provided
|
||||
// io.Reader.
|
||||
func NewStreamReader(r io.Reader, size int) *StreamReader {
|
||||
if size == 0 {
|
||||
size = 4096
|
||||
}
|
||||
return &StreamReader{
|
||||
r: r,
|
||||
buf: make([]byte, 0, size), // Start with a reasonable capacity
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements io.Reader. It first returns any buffered data after the
|
||||
// current offset, and if that's exhausted, reads from the underlying reader
|
||||
// and buffers the data. The returned data is not considered consumed until the
|
||||
// Consume method is called.
|
||||
func (r *StreamReader) Read(p []byte) (n int, err error) {
|
||||
// If we have buffered data, return it
|
||||
if r.head < len(r.buf) {
|
||||
n = copy(p, r.buf[r.head:])
|
||||
r.head += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// If we've already hit EOF, return it
|
||||
if r.r == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Read from the underlying reader
|
||||
n, err = r.r.Read(p)
|
||||
if n > 0 {
|
||||
r.buf = append(r.buf, p[:n]...)
|
||||
r.head += n
|
||||
}
|
||||
if err == nil {
|
||||
return n, nil
|
||||
}
|
||||
if err == io.EOF {
|
||||
// Store that we've hit EOF by setting r to nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadN reads exactly n bytes from the reader, blocking until all bytes are
|
||||
// read or an error occurs. If an error occurs, the number of bytes read is
|
||||
// returned along with the error. If EOF is hit before n bytes are read, this
|
||||
// will return the bytes read so far, along with io.EOF. The returned data is
|
||||
// not considered consumed until the Consume method is called.
|
||||
func (r *StreamReader) ReadN(want int) ([]byte, error) {
|
||||
ret := make([]byte, want)
|
||||
off := 0
|
||||
for off < want {
|
||||
n, err := r.Read(ret[off:])
|
||||
if err != nil {
|
||||
return ret[:off+n], err
|
||||
}
|
||||
off += n
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Peek returns the next n bytes without advancing the reader. The returned
|
||||
// bytes are valid until the next call to Consume.
|
||||
func (r *StreamReader) Peek(n int) ([]byte, error) {
|
||||
buf, err := r.ReadN(n)
|
||||
r.RewindN(len(buf))
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Rewind resets the reader to the beginning of the buffered data.
|
||||
func (r *StreamReader) Rewind() {
|
||||
r.head = 0
|
||||
}
|
||||
|
||||
// RewindN rewinds the reader by n bytes. If n is greater than the current
|
||||
// buffer, the reader is rewound to the beginning of the buffer.
|
||||
func (r *StreamReader) RewindN(n int) {
|
||||
r.head -= min(n, r.head)
|
||||
}
|
||||
|
||||
// Consume discards up to n bytes of previously read data from the beginning of
|
||||
// the buffer. Once consumed, that data is no longer available for rewinding.
|
||||
// If n is greater than the current buffer, the buffer is cleared. Consume
|
||||
// never consume data from the underlying reader.
|
||||
func (r *StreamReader) Consume(n int) {
|
||||
n = min(n, len(r.buf))
|
||||
r.buf = r.buf[n:]
|
||||
r.head -= n
|
||||
r.ttlConsumed += n
|
||||
}
|
||||
|
||||
// Consumed returns the number of bytes consumed from the input reader.
|
||||
func (r *StreamReader) Consumed() int {
|
||||
return r.ttlConsumed
|
||||
}
|
Reference in New Issue
Block a user