mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
rebase: bump k8s.io/kubernetes in the k8s-dependencies group
Bumps the k8s-dependencies group with 1 update: [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes). Updates `k8s.io/kubernetes` from 1.32.3 to 1.33.0 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.32.3...v1.33.0) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-version: 1.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
4147d5d15a
commit
51895f8619
163
vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
generated
vendored
163
vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
generated
vendored
@ -20,10 +20,12 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
jsonutil "k8s.io/apimachinery/pkg/util/json"
|
||||
|
||||
@ -92,7 +94,7 @@ func UnmarshalStrict(data []byte, v interface{}) error {
|
||||
// YAML decoding path is not used (so that error messages are
|
||||
// JSON specific).
|
||||
func ToJSON(data []byte) ([]byte, error) {
|
||||
if hasJSONPrefix(data) {
|
||||
if IsJSONBuffer(data) {
|
||||
return data, nil
|
||||
}
|
||||
return yaml.YAMLToJSON(data)
|
||||
@ -102,7 +104,8 @@ func ToJSON(data []byte) ([]byte, error) {
|
||||
// separating individual documents. It first converts the YAML
|
||||
// body to JSON, then unmarshals the JSON.
|
||||
type YAMLToJSONDecoder struct {
|
||||
reader Reader
|
||||
reader Reader
|
||||
inputOffset int
|
||||
}
|
||||
|
||||
// NewYAMLToJSONDecoder decodes YAML documents from the provided
|
||||
@ -121,7 +124,7 @@ func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
|
||||
// yaml.Unmarshal.
|
||||
func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
|
||||
bytes, err := d.reader.Read()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && err != io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
|
||||
@ -131,9 +134,14 @@ func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
|
||||
return YAMLSyntaxError{err}
|
||||
}
|
||||
}
|
||||
d.inputOffset += len(bytes)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *YAMLToJSONDecoder) InputOffset() int {
|
||||
return d.inputOffset
|
||||
}
|
||||
|
||||
// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
|
||||
// the data is not sufficient.
|
||||
type YAMLDecoder struct {
|
||||
@ -229,18 +237,20 @@ func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// decoder is a convenience interface for Decode.
|
||||
type decoder interface {
|
||||
Decode(into interface{}) error
|
||||
}
|
||||
|
||||
// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
|
||||
// YAML documents by sniffing for a leading { character.
|
||||
// YAMLOrJSONDecoder attempts to decode a stream of JSON or YAML documents.
|
||||
// While JSON is YAML, the way Go's JSON decode defines a multi-document stream
|
||||
// is a series of JSON objects (e.g. {}{}), but YAML defines a multi-document
|
||||
// stream as a series of documents separated by "---".
|
||||
//
|
||||
// This decoder will attempt to decode the stream as JSON first, and if that
|
||||
// fails, it will switch to YAML. Once it determines the stream is JSON (by
|
||||
// finding a non-YAML-delimited series of objects), it will not switch to YAML.
|
||||
// Once it switches to YAML it will not switch back to JSON.
|
||||
type YAMLOrJSONDecoder struct {
|
||||
r io.Reader
|
||||
bufferSize int
|
||||
|
||||
decoder decoder
|
||||
json *json.Decoder
|
||||
yaml *YAMLToJSONDecoder
|
||||
stream *StreamReader
|
||||
count int // how many objects have been decoded
|
||||
}
|
||||
|
||||
type JSONSyntaxError struct {
|
||||
@ -265,31 +275,108 @@ func (e YAMLSyntaxError) Error() string {
|
||||
// how far into the stream the decoder will look to figure out whether this
|
||||
// is a JSON stream (has whitespace followed by an open brace).
|
||||
func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
|
||||
return &YAMLOrJSONDecoder{
|
||||
r: r,
|
||||
bufferSize: bufferSize,
|
||||
d := &YAMLOrJSONDecoder{}
|
||||
|
||||
reader, _, mightBeJSON := GuessJSONStream(r, bufferSize)
|
||||
d.stream = reader
|
||||
if mightBeJSON {
|
||||
d.json = json.NewDecoder(reader)
|
||||
} else {
|
||||
d.yaml = NewYAMLToJSONDecoder(reader)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// Decode unmarshals the next object from the underlying stream into the
|
||||
// provide object, or returns an error.
|
||||
func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
|
||||
if d.decoder == nil {
|
||||
buffer, _, isJSON := GuessJSONStream(d.r, d.bufferSize)
|
||||
if isJSON {
|
||||
d.decoder = json.NewDecoder(buffer)
|
||||
// Because we don't know if this is a JSON or YAML stream, a failure from
|
||||
// both decoders is ambiguous. When in doubt, it will return the error from
|
||||
// the JSON decoder. Unfortunately, this means that if the first document
|
||||
// is invalid YAML, the error won't be awesome.
|
||||
// TODO: the errors from YAML are not great, we could improve them a lot.
|
||||
var firstErr error
|
||||
if d.json != nil {
|
||||
err := d.json.Decode(into)
|
||||
if err == nil {
|
||||
d.stream.Consume(int(d.json.InputOffset()) - d.stream.Consumed())
|
||||
d.count++
|
||||
return nil
|
||||
}
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
var syntax *json.SyntaxError
|
||||
if ok := errors.As(err, &syntax); ok {
|
||||
firstErr = JSONSyntaxError{
|
||||
Offset: syntax.Offset,
|
||||
Err: syntax,
|
||||
}
|
||||
} else {
|
||||
d.decoder = NewYAMLToJSONDecoder(buffer)
|
||||
firstErr = err
|
||||
}
|
||||
if d.count > 1 {
|
||||
// If we found 0 or 1 JSON object(s), this stream is still
|
||||
// ambiguous. But if we found more than 1 JSON object, then this
|
||||
// is an unambiguous JSON stream, and we should not switch to YAML.
|
||||
return err
|
||||
}
|
||||
// If JSON decoding hits the end of one object and then fails on the
|
||||
// next, it leaves any leading whitespace in the buffer, which can
|
||||
// confuse the YAML decoder. We just eat any whitespace we find, up to
|
||||
// and including the first newline.
|
||||
d.stream.Rewind()
|
||||
if err := d.consumeWhitespace(); err == nil {
|
||||
d.yaml = NewYAMLToJSONDecoder(d.stream)
|
||||
}
|
||||
d.json = nil
|
||||
}
|
||||
if d.yaml != nil {
|
||||
err := d.yaml.Decode(into)
|
||||
if err == nil {
|
||||
d.stream.Consume(d.yaml.InputOffset() - d.stream.Consumed())
|
||||
d.count++
|
||||
return nil
|
||||
}
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
err := d.decoder.Decode(into)
|
||||
if syntax, ok := err.(*json.SyntaxError); ok {
|
||||
return JSONSyntaxError{
|
||||
Offset: syntax.Offset,
|
||||
Err: syntax,
|
||||
if firstErr != nil {
|
||||
return firstErr
|
||||
}
|
||||
return fmt.Errorf("decoding failed as both JSON and YAML")
|
||||
}
|
||||
|
||||
func (d *YAMLOrJSONDecoder) consumeWhitespace() error {
|
||||
consumed := 0
|
||||
for {
|
||||
buf, err := d.stream.ReadN(4)
|
||||
if err != nil && err == io.EOF { //nolint:errorlint
|
||||
return err
|
||||
}
|
||||
r, sz := utf8.DecodeRune(buf)
|
||||
if r == utf8.RuneError || sz == 0 {
|
||||
return fmt.Errorf("invalid utf8 rune")
|
||||
}
|
||||
d.stream.RewindN(len(buf) - sz)
|
||||
if !unicode.IsSpace(r) {
|
||||
d.stream.RewindN(sz)
|
||||
d.stream.Consume(consumed)
|
||||
return nil
|
||||
}
|
||||
if r == '\n' {
|
||||
d.stream.Consume(consumed)
|
||||
return nil
|
||||
}
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
type Reader interface {
|
||||
@ -311,7 +398,7 @@ func (r *YAMLReader) Read() ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
for {
|
||||
line, err := r.reader.Read()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && err != io.EOF { //nolint:errorlint
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -329,11 +416,11 @@ func (r *YAMLReader) Read() ([]byte, error) {
|
||||
if buffer.Len() != 0 {
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
if err == io.EOF { //nolint:errorlint
|
||||
if buffer.Len() != 0 {
|
||||
// If we're at EOF, we have a final, non-terminated line. Return it.
|
||||
return buffer.Bytes(), nil
|
||||
@ -369,26 +456,20 @@ func (r *LineReader) Read() ([]byte, error) {
|
||||
// GuessJSONStream scans the provided reader up to size, looking
|
||||
// for an open brace indicating this is JSON. It will return the
|
||||
// bufio.Reader it creates for the consumer.
|
||||
func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
|
||||
buffer := bufio.NewReaderSize(r, size)
|
||||
func GuessJSONStream(r io.Reader, size int) (*StreamReader, []byte, bool) {
|
||||
buffer := NewStreamReader(r, size)
|
||||
b, _ := buffer.Peek(size)
|
||||
return buffer, b, hasJSONPrefix(b)
|
||||
return buffer, b, IsJSONBuffer(b)
|
||||
}
|
||||
|
||||
// IsJSONBuffer scans the provided buffer, looking
|
||||
// for an open brace indicating this is JSON.
|
||||
func IsJSONBuffer(buf []byte) bool {
|
||||
return hasJSONPrefix(buf)
|
||||
return hasPrefix(buf, jsonPrefix)
|
||||
}
|
||||
|
||||
var jsonPrefix = []byte("{")
|
||||
|
||||
// hasJSONPrefix returns true if the provided buffer appears to start with
|
||||
// a JSON open brace.
|
||||
func hasJSONPrefix(buf []byte) bool {
|
||||
return hasPrefix(buf, jsonPrefix)
|
||||
}
|
||||
|
||||
// Return true if the first non-whitespace bytes in buf is
|
||||
// prefix.
|
||||
func hasPrefix(buf []byte, prefix []byte) bool {
|
||||
|
130
vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
generated
vendored
Normal file
130
vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package yaml
|
||||
|
||||
import "io"
|
||||
|
||||
// StreamReader is a reader designed for consuming streams of variable-length
|
||||
// messages. It buffers data until it is explicitly consumed, and can be
|
||||
// rewound to re-read previous data.
|
||||
type StreamReader struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
head int // current read offset into buf
|
||||
ttlConsumed int // number of bytes which have been consumed
|
||||
}
|
||||
|
||||
// NewStreamReader creates a new StreamReader wrapping the provided
|
||||
// io.Reader.
|
||||
func NewStreamReader(r io.Reader, size int) *StreamReader {
|
||||
if size == 0 {
|
||||
size = 4096
|
||||
}
|
||||
return &StreamReader{
|
||||
r: r,
|
||||
buf: make([]byte, 0, size), // Start with a reasonable capacity
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements io.Reader. It first returns any buffered data after the
|
||||
// current offset, and if that's exhausted, reads from the underlying reader
|
||||
// and buffers the data. The returned data is not considered consumed until the
|
||||
// Consume method is called.
|
||||
func (r *StreamReader) Read(p []byte) (n int, err error) {
|
||||
// If we have buffered data, return it
|
||||
if r.head < len(r.buf) {
|
||||
n = copy(p, r.buf[r.head:])
|
||||
r.head += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// If we've already hit EOF, return it
|
||||
if r.r == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Read from the underlying reader
|
||||
n, err = r.r.Read(p)
|
||||
if n > 0 {
|
||||
r.buf = append(r.buf, p[:n]...)
|
||||
r.head += n
|
||||
}
|
||||
if err == nil {
|
||||
return n, nil
|
||||
}
|
||||
if err == io.EOF {
|
||||
// Store that we've hit EOF by setting r to nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadN reads exactly n bytes from the reader, blocking until all bytes are
|
||||
// read or an error occurs. If an error occurs, the number of bytes read is
|
||||
// returned along with the error. If EOF is hit before n bytes are read, this
|
||||
// will return the bytes read so far, along with io.EOF. The returned data is
|
||||
// not considered consumed until the Consume method is called.
|
||||
func (r *StreamReader) ReadN(want int) ([]byte, error) {
|
||||
ret := make([]byte, want)
|
||||
off := 0
|
||||
for off < want {
|
||||
n, err := r.Read(ret[off:])
|
||||
if err != nil {
|
||||
return ret[:off+n], err
|
||||
}
|
||||
off += n
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Peek returns the next n bytes without advancing the reader. The returned
|
||||
// bytes are valid until the next call to Consume.
|
||||
func (r *StreamReader) Peek(n int) ([]byte, error) {
|
||||
buf, err := r.ReadN(n)
|
||||
r.RewindN(len(buf))
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// Rewind resets the reader to the beginning of the buffered data.
|
||||
func (r *StreamReader) Rewind() {
|
||||
r.head = 0
|
||||
}
|
||||
|
||||
// RewindN rewinds the reader by n bytes. If n is greater than the current
|
||||
// buffer, the reader is rewound to the beginning of the buffer.
|
||||
func (r *StreamReader) RewindN(n int) {
|
||||
r.head -= min(n, r.head)
|
||||
}
|
||||
|
||||
// Consume discards up to n bytes of previously read data from the beginning of
|
||||
// the buffer. Once consumed, that data is no longer available for rewinding.
|
||||
// If n is greater than the current buffer, the buffer is cleared. Consume
|
||||
// never consume data from the underlying reader.
|
||||
func (r *StreamReader) Consume(n int) {
|
||||
n = min(n, len(r.buf))
|
||||
r.buf = r.buf[n:]
|
||||
r.head -= n
|
||||
r.ttlConsumed += n
|
||||
}
|
||||
|
||||
// Consumed returns the number of bytes consumed from the input reader.
|
||||
func (r *StreamReader) Consumed() int {
|
||||
return r.ttlConsumed
|
||||
}
|
Reference in New Issue
Block a user