rebase: bump k8s.io/api in /api in the k8s-dependencies group

Bumps the k8s-dependencies group in /api with 1 update: [k8s.io/api](https://github.com/kubernetes/api).


Updates `k8s.io/api` from 0.32.3 to 0.33.0
- [Commits](https://github.com/kubernetes/api/compare/v0.32.3...v0.33.0)

---
updated-dependencies:
- dependency-name: k8s.io/api
  dependency-version: 0.33.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: k8s-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2025-05-06 12:29:48 +00:00 committed by mergify[bot]
parent 05e3827a4f
commit af12c6bf1b
57 changed files with 3813 additions and 2176 deletions

View File

@ -1,12 +1,13 @@
module github.com/ceph/ceph-csi/api
go 1.23.0
toolchain go1.24.1
require (
github.com/ghodss/yaml v1.0.0
github.com/openshift/api v0.0.0-20240115183315-0793e918179d
github.com/stretchr/testify v1.10.0
k8s.io/api v0.32.3
k8s.io/api v0.33.0
)
require (
@ -14,7 +15,6 @@ require (
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@ -26,11 +26,12 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.32.3 // indirect
k8s.io/apimachinery v0.33.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

View File

@ -12,11 +12,9 @@ github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@ -35,8 +33,8 @@ github.com/openshift/api v0.0.0-20240115183315-0793e918179d/go.mod h1:CxgbWAlvu2
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -85,17 +83,20 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -1,10 +0,0 @@
language: go
go:
- 1.11.x
- 1.12.x
- 1.13.x
- master
script:
- go test -cover

View File

@ -1,67 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Go makes it very simple to ensure properly formatted code, so always run
`go fmt` on your code before committing it. You should also run
[golint][] over your code. As noted in the [golint readme][], it's not
strictly necessary that your code be completely "lint-free", but this will
help you find common style issues.
1. Any significant changes should almost always be accompanied by tests. The
project already has good test coverage, so look at some of the existing
tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
are invaluable tools for seeing which parts of your code aren't being
exercised by your tests.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[golint]: https://github.com/golang/lint
[golint readme]: https://github.com/golang/lint/blob/master/README
[gocov]: https://github.com/axw/gocov
[gocov-html]: https://github.com/matm/gocov-html
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@ -1,18 +0,0 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fuzz is a library for populating go objects with random values.
package fuzz

View File

@ -1,605 +0,0 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fuzz
import (
"fmt"
"math/rand"
"reflect"
"regexp"
"time"
"github.com/google/gofuzz/bytesource"
"strings"
)
// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
type fuzzFuncMap map[reflect.Type]reflect.Value
// Fuzzer knows how to fill any object with random fields.
type Fuzzer struct {
fuzzFuncs fuzzFuncMap
defaultFuzzFuncs fuzzFuncMap
r *rand.Rand
nilChance float64
minElements int
maxElements int
maxDepth int
skipFieldPatterns []*regexp.Regexp
}
// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
// RandSource, NilChance, or NumElements in any order.
func New() *Fuzzer {
return NewWithSeed(time.Now().UnixNano())
}
func NewWithSeed(seed int64) *Fuzzer {
f := &Fuzzer{
defaultFuzzFuncs: fuzzFuncMap{
reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
},
fuzzFuncs: fuzzFuncMap{},
r: rand.New(rand.NewSource(seed)),
nilChance: .2,
minElements: 1,
maxElements: 10,
maxDepth: 100,
}
return f
}
// NewFromGoFuzz is a helper function that enables using gofuzz (this
// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
// fuzzing. Essentially, it enables translating the fuzzing bytes from
// go-fuzz to any Go object using this library.
//
// This implementation promises a constant translation from a given slice of
// bytes to the fuzzed objects. This promise will remain over future
// versions of Go and of this library.
//
// Note: the returned Fuzzer should not be shared between multiple goroutines,
// as its deterministic output will no longer be available.
//
// Example: use go-fuzz to test the function `MyFunc(int)` in the package
// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content:
//
// // +build gofuzz
// package mypacakge
// import fuzz "github.com/google/gofuzz"
// func Fuzz(data []byte) int {
// var i int
// fuzz.NewFromGoFuzz(data).Fuzz(&i)
// MyFunc(i)
// return 0
// }
func NewFromGoFuzz(data []byte) *Fuzzer {
return New().RandSource(bytesource.New(data))
}
// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
//
// Each entry in fuzzFuncs must be a function taking two parameters.
// The first parameter must be a pointer or map. It is the variable that
// function will fill with random data. The second parameter must be a
// fuzz.Continue, which will provide a source of randomness and a way
// to automatically continue fuzzing smaller pieces of the first parameter.
//
// These functions are called sensibly, e.g., if you wanted custom string
// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
// called and passed the address of strings. Maps and pointers will always
// be made/new'd for you, ignoring the NilChange option. For slices, it
// doesn't make much sense to pre-create them--Fuzzer doesn't know how
// long you want your slice--so take a pointer to a slice, and make it
// yourself. (If you don't want your map/pointer type pre-made, take a
// pointer to it, and make it yourself.) See the examples for a range of
// custom functions.
func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
for i := range fuzzFuncs {
v := reflect.ValueOf(fuzzFuncs[i])
if v.Kind() != reflect.Func {
panic("Need only funcs!")
}
t := v.Type()
if t.NumIn() != 2 || t.NumOut() != 0 {
panic("Need 2 in and 0 out params!")
}
argT := t.In(0)
switch argT.Kind() {
case reflect.Ptr, reflect.Map:
default:
panic("fuzzFunc must take pointer or map type")
}
if t.In(1) != reflect.TypeOf(Continue{}) {
panic("fuzzFunc's second parameter must be type fuzz.Continue")
}
f.fuzzFuncs[argT] = v
}
return f
}
// RandSource causes f to get values from the given source of randomness.
// Use if you want deterministic fuzzing.
func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
f.r = rand.New(s)
return f
}
// NilChance sets the probability of creating a nil pointer, map, or slice to
// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
func (f *Fuzzer) NilChance(p float64) *Fuzzer {
if p < 0 || p > 1 {
panic("p should be between 0 and 1, inclusive.")
}
f.nilChance = p
return f
}
// NumElements sets the minimum and maximum number of elements that will be
// added to a non-nil map or slice.
func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
if atLeast > atMost {
panic("atLeast must be <= atMost")
}
if atLeast < 0 {
panic("atLeast must be >= 0")
}
f.minElements = atLeast
f.maxElements = atMost
return f
}
func (f *Fuzzer) genElementCount() int {
if f.minElements == f.maxElements {
return f.minElements
}
return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
}
func (f *Fuzzer) genShouldFill() bool {
return f.r.Float64() >= f.nilChance
}
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
// before stopping. This includes struct members, pointers, and map and slice
// elements.
func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
f.maxDepth = d
return f
}
// Skip fields which match the supplied pattern. Call this multiple times if needed
// This is useful to skip XXX_ fields generated by protobuf
func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer {
f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
return f
}
// Fuzz recursively fills all of obj's fields with something random. First
// this tries to find a custom fuzz function (see Funcs). If there is no
// custom function this tests whether the object implements fuzz.Interface and,
// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
// there is a default fuzz function provided by this package. If all of that
// fails, this will generate random values for all primitive fields and then
// recurse for all non-primitives.
//
// This is safe for cyclic or tree-like structs, up to a limit. Use the
// MaxDepth method to adjust how deep you need it to recurse.
//
// obj must be a pointer. Only exported (public) fields can be set (thanks,
// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
// fields.
func (f *Fuzzer) Fuzz(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
f.fuzzWithContext(v, 0)
}
// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
// obj's type will not be called and obj will not be tested for fuzz.Interface
// conformance. This applies only to obj and not other instances of obj's
// type.
// Not safe for cyclic or tree-like structs!
// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
// Intended for tests, so will panic on bad input or unimplemented fields.
func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
f.fuzzWithContext(v, flagNoCustomFuzz)
}
const (
// Do not try to find a custom fuzz function. Does not apply recursively.
flagNoCustomFuzz uint64 = 1 << iota
)
func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
fc := &fuzzerContext{fuzzer: f}
fc.doFuzz(v, flags)
}
// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
// be thread-safe.
type fuzzerContext struct {
fuzzer *Fuzzer
curDepth int
}
func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
if fc.curDepth >= fc.fuzzer.maxDepth {
return
}
fc.curDepth++
defer func() { fc.curDepth-- }()
if !v.CanSet() {
return
}
if flags&flagNoCustomFuzz == 0 {
// Check for both pointer and non-pointer custom functions.
if v.CanAddr() && fc.tryCustom(v.Addr()) {
return
}
if fc.tryCustom(v) {
return
}
}
if fn, ok := fillFuncMap[v.Kind()]; ok {
fn(v, fc.fuzzer.r)
return
}
switch v.Kind() {
case reflect.Map:
if fc.fuzzer.genShouldFill() {
v.Set(reflect.MakeMap(v.Type()))
n := fc.fuzzer.genElementCount()
for i := 0; i < n; i++ {
key := reflect.New(v.Type().Key()).Elem()
fc.doFuzz(key, 0)
val := reflect.New(v.Type().Elem()).Elem()
fc.doFuzz(val, 0)
v.SetMapIndex(key, val)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Ptr:
if fc.fuzzer.genShouldFill() {
v.Set(reflect.New(v.Type().Elem()))
fc.doFuzz(v.Elem(), 0)
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Slice:
if fc.fuzzer.genShouldFill() {
n := fc.fuzzer.genElementCount()
v.Set(reflect.MakeSlice(v.Type(), n, n))
for i := 0; i < n; i++ {
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Array:
if fc.fuzzer.genShouldFill() {
n := v.Len()
for i := 0; i < n; i++ {
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
skipField := false
fieldName := v.Type().Field(i).Name
for _, pattern := range fc.fuzzer.skipFieldPatterns {
if pattern.MatchString(fieldName) {
skipField = true
break
}
}
if !skipField {
fc.doFuzz(v.Field(i), 0)
}
}
case reflect.Chan:
fallthrough
case reflect.Func:
fallthrough
case reflect.Interface:
fallthrough
default:
panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
}
}
// tryCustom searches for custom handlers, and returns true iff it finds a match
// and successfully randomizes v.
func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
// First: see if we have a fuzz function for it.
doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
if !ok {
// Second: see if it can fuzz itself.
if v.CanInterface() {
intf := v.Interface()
if fuzzable, ok := intf.(Interface); ok {
fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
return true
}
}
// Finally: see if there is a default fuzz function.
doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
if !ok {
return false
}
}
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() {
if !v.CanSet() {
return false
}
v.Set(reflect.New(v.Type().Elem()))
}
case reflect.Map:
if v.IsNil() {
if !v.CanSet() {
return false
}
v.Set(reflect.MakeMap(v.Type()))
}
default:
return false
}
doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
fc: fc,
Rand: fc.fuzzer.r,
})})
return true
}
// Interface represents an object that knows how to fuzz itself. Any time we
// find a type that implements this interface we will delegate the act of
// fuzzing itself.
type Interface interface {
Fuzz(c Continue)
}
// Continue can be passed to custom fuzzing functions to allow them to use
// the correct source of randomness and to continue fuzzing their members.
type Continue struct {
fc *fuzzerContext
// For convenience, Continue implements rand.Rand via embedding.
// Use this for generating any randomness if you want your fuzzing
// to be repeatable for a given seed.
*rand.Rand
}
// Fuzz continues fuzzing obj. obj must be a pointer.
func (c Continue) Fuzz(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
c.fc.doFuzz(v, 0)
}
// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
// obj's type will not be called and obj will not be tested for fuzz.Interface
// conformance. This applies only to obj and not other instances of obj's
// type.
func (c Continue) FuzzNoCustom(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
c.fc.doFuzz(v, flagNoCustomFuzz)
}
// RandString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings.
func (c Continue) RandString() string {
return randString(c.Rand)
}
// RandUint64 makes random 64 bit numbers.
// Weirdly, rand doesn't have a function that gives you 64 random bits.
func (c Continue) RandUint64() uint64 {
return randUint64(c.Rand)
}
// RandBool returns true or false randomly.
func (c Continue) RandBool() bool {
return randBool(c.Rand)
}
func fuzzInt(v reflect.Value, r *rand.Rand) {
v.SetInt(int64(randUint64(r)))
}
func fuzzUint(v reflect.Value, r *rand.Rand) {
v.SetUint(randUint64(r))
}
func fuzzTime(t *time.Time, c Continue) {
var sec, nsec int64
// Allow for about 1000 years of random time values, which keeps things
// like JSON parsing reasonably happy.
sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
c.Fuzz(&nsec)
*t = time.Unix(sec, nsec)
}
var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
reflect.Bool: func(v reflect.Value, r *rand.Rand) {
v.SetBool(randBool(r))
},
reflect.Int: fuzzInt,
reflect.Int8: fuzzInt,
reflect.Int16: fuzzInt,
reflect.Int32: fuzzInt,
reflect.Int64: fuzzInt,
reflect.Uint: fuzzUint,
reflect.Uint8: fuzzUint,
reflect.Uint16: fuzzUint,
reflect.Uint32: fuzzUint,
reflect.Uint64: fuzzUint,
reflect.Uintptr: fuzzUint,
reflect.Float32: func(v reflect.Value, r *rand.Rand) {
v.SetFloat(float64(r.Float32()))
},
reflect.Float64: func(v reflect.Value, r *rand.Rand) {
v.SetFloat(r.Float64())
},
reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
},
reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
v.SetComplex(complex(r.Float64(), r.Float64()))
},
reflect.String: func(v reflect.Value, r *rand.Rand) {
v.SetString(randString(r))
},
reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
panic("unimplemented")
},
}
// randBool returns true or false randomly.
func randBool(r *rand.Rand) bool {
return r.Int31()&(1<<30) == 0
}
type int63nPicker interface {
Int63n(int64) int64
}
// UnicodeRange describes a sequential range of unicode characters.
// Last must be numerically greater than First.
type UnicodeRange struct {
First, Last rune
}
// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
// To be useful, each range must have at least one character (First <= Last) and
// there must be at least one range.
type UnicodeRanges []UnicodeRange
// choose returns a random unicode character from the given range, using the
// given randomness source.
func (ur UnicodeRange) choose(r int63nPicker) rune {
count := int64(ur.Last - ur.First + 1)
return ur.First + rune(r.Int63n(count))
}
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
// Each character is selected from the range ur. If there are no characters
// in the range (cr.Last < cr.First), this will panic.
func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) {
ur.check()
return func(s *string, c Continue) {
*s = ur.randString(c.Rand)
}
}
// check is a function that used to check whether the first of ur(UnicodeRange)
// is greater than the last one.
func (ur UnicodeRange) check() {
if ur.Last < ur.First {
panic("The last encoding must be greater than the first one.")
}
}
// randString of UnicodeRange makes a random string up to 20 characters long.
// Each character is selected form ur(UnicodeRange).
func (ur UnicodeRange) randString(r *rand.Rand) string {
n := r.Intn(20)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur.choose(r))
}
return sb.String()
}
// defaultUnicodeRanges sets a default unicode range when user do not set
// CustomStringFuzzFunc() but wants fuzz string.
var defaultUnicodeRanges = UnicodeRanges{
{' ', '~'}, // ASCII characters
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
}
// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings.
// Each character is selected from one of the ranges of ur(UnicodeRanges).
// Each range has an equal probability of being chosen. If there are no ranges,
// or a selected range has no characters (.Last < .First), this will panic.
// Do not modify any of the ranges in ur after calling this function.
func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) {
// Check unicode ranges slice is empty.
if len(ur) == 0 {
panic("UnicodeRanges is empty.")
}
// if not empty, each range should be checked.
for i := range ur {
ur[i].check()
}
return func(s *string, c Continue) {
*s = ur.randString(c.Rand)
}
}
// randString of UnicodeRanges makes a random string up to 20 characters long.
// Each character is selected form one of the ranges of ur(UnicodeRanges),
// and each range has an equal probability of being chosen.
func (ur UnicodeRanges) randString(r *rand.Rand) string {
n := r.Intn(20)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
}
return sb.String()
}
// randString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings.
func randString(r *rand.Rand) string {
return defaultUnicodeRanges.randString(r)
}
// randUint64 makes random 64 bit numbers.
// Weirdly, rand doesn't have a function that gives you 64 random bits.
func randUint64(r *rand.Rand) uint64 {
return uint64(r.Uint32())<<32 | uint64(r.Uint32())
}

View File

@ -21,4 +21,4 @@ limitations under the License.
// +groupName=
// Package v1 is the v1 version of the core API.
package v1 // import "k8s.io/api/core/v1"
package v1

File diff suppressed because it is too large Load Diff

View File

@ -1103,6 +1103,11 @@ message ContainerStatus {
// +listType=map
// +listMapKey=name
repeated ResourceStatus allocatedResourcesStatus = 14;
// StopSignal reports the effective stop signal for this container
// +featureGate=ContainerStopSignals
// +optional
optional string stopSignal = 15;
}
// ContainerUser represents user identity information
@ -1194,6 +1199,7 @@ message EmptyDirVolumeSource {
}
// EndpointAddress is a tuple that describes single IP address.
// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
message EndpointAddress {
// The IP of this endpoint.
@ -1215,6 +1221,7 @@ message EndpointAddress {
}
// EndpointPort is a tuple that describes a single port.
// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
message EndpointPort {
// The name of this port. This must match the 'name' field in the
@ -1265,6 +1272,8 @@ message EndpointPort {
//
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
//
// Deprecated: This API is deprecated in v1.33+.
message EndpointSubset {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
@ -1298,6 +1307,11 @@ message EndpointSubset {
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
//
// Endpoints is a legacy API and does not contain information about all Service features.
// Use discoveryv1.EndpointSlice for complete information about Service endpoints.
//
// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.
message Endpoints {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
@ -1317,6 +1331,7 @@ message Endpoints {
}
// EndpointsList is a list of endpoints.
// Deprecated: This API is deprecated in v1.33+.
message EndpointsList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
@ -1327,9 +1342,9 @@ message EndpointsList {
repeated Endpoints items = 2;
}
// EnvFromSource represents the source of a set of ConfigMaps
// EnvFromSource represents the source of a set of ConfigMaps or Secrets
message EnvFromSource {
// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
// +optional
optional string prefix = 1;
@ -2198,6 +2213,12 @@ message Lifecycle {
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
optional LifecycleHandler preStop = 2;
// StopSignal defines which signal will be sent to a container when it is being stopped.
// If not specified, the default is defined by the container runtime in use.
// StopSignal can only be set for Pods with a non-empty .spec.os.name
// +optional
optional string stopSignal = 3;
}
// LifecycleHandler defines a specific action that should be taken in a lifecycle
@ -2862,6 +2883,13 @@ message NodeStatus {
optional NodeFeatures features = 13;
}
// NodeSwapStatus represents swap memory information.
message NodeSwapStatus {
// Total amount of swap memory in bytes.
// +optional
optional int64 capacity = 1;
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
message NodeSystemInfo {
// MachineID reported by the node. For unique machine identification
@ -2897,6 +2925,9 @@ message NodeSystemInfo {
// The Architecture reported by the node
optional string architecture = 10;
// Swap Info reported by the node.
optional NodeSwapStatus swap = 11;
}
// ObjectFieldSelector selects an APIVersioned field of an object.
@ -3615,7 +3646,6 @@ message PodAffinityTerm {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@ -3629,7 +3659,6 @@ message PodAffinityTerm {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@ -3702,6 +3731,12 @@ message PodCondition {
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
optional string type = 1;
// If set, this represents the .metadata.generation that the pod condition was set based upon.
// This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
optional int64 observedGeneration = 7;
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
@ -4138,7 +4173,7 @@ message PodSpec {
// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
@ -4487,6 +4522,12 @@ message PodSpec {
// state of a system, especially if the node that hosts the pod cannot contact the control
// plane.
message PodStatus {
// If set, this represents the .metadata.generation that the pod status was set based upon.
// This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
optional int64 observedGeneration = 17;
// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
// The conditions array, the reason and message fields, and the individual container status
// arrays contain more detail about the pod's status.
@ -4618,6 +4659,9 @@ message PodStatus {
// Status of resources resize desired for pod's containers.
// It is empty if no resources resize is pending.
// Any changes to container resources will automatically set this to "Proposed"
// Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
// PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
// PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
// +featureGate=InPlacePodVerticalScaling
// +optional
optional string resize = 14;
@ -5063,12 +5107,18 @@ message ReplicationControllerSpec {
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
// +k8s:optional
// +default=1
// +k8s:minimum=0
optional int32 replicas = 1;
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
optional int32 minReadySeconds = 4;
// Selector is a label query over pods that should match the Replicas count.
@ -6110,13 +6160,12 @@ message ServiceSpec {
// +optional
optional string internalTrafficPolicy = 22;
// TrafficDistribution offers a way to express preferences for how traffic is
// distributed to Service endpoints. Implementations can use this field as a
// hint, but are not required to guarantee strict adherence. If the field is
// not set, the implementation will apply its default routing strategy. If set
// to "PreferClose", implementations should prioritize endpoints that are
// topologically close (e.g., same zone).
// This is a beta field and requires enabling ServiceTrafficDistribution feature.
// TrafficDistribution offers a way to express preferences for how traffic
// is distributed to Service endpoints. Implementations can use this field
// as a hint, but are not required to guarantee strict adherence. If the
// field is not set, the implementation will apply its default routing
// strategy. If set to "PreferClose", implementations should prioritize
// endpoints that are in the same zone.
// +featureGate=ServiceTrafficDistribution
// +optional
optional string trafficDistribution = 23;
@ -6411,7 +6460,6 @@ message TopologySpreadConstraint {
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
//
// If this value is nil, the behavior is equivalent to the Honor policy.
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
optional string nodeAffinityPolicy = 6;
@ -6422,7 +6470,6 @@ message TopologySpreadConstraint {
// - Ignore: node taints are ignored. All nodes are included.
//
// If this value is nil, the behavior is equivalent to the Ignore policy.
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
optional string nodeTaintsPolicy = 7;
@ -6854,7 +6901,7 @@ message VolumeSource {
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
// The volume will be mounted read-only (ro) and non-executable files (noexec).
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
// +featureGate=ImageVolume
// +optional

View File

@ -16,6 +16,10 @@ limitations under the License.
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
// APILifecycleIntroduced returns the release in which the API struct was introduced as int versions of major and minor for comparison.
func (in *ComponentStatus) APILifecycleIntroduced() (major, minor int) {
return 1, 0
@ -35,3 +39,23 @@ func (in *ComponentStatusList) APILifecycleIntroduced() (major, minor int) {
func (in *ComponentStatusList) APILifecycleDeprecated() (major, minor int) {
return 1, 19
}
// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
func (in *Endpoints) APILifecycleDeprecated() (major, minor int) {
return 1, 33
}
// APILifecycleReplacement returns the GVK of the replacement for the given API
func (in *Endpoints) APILifecycleReplacement() schema.GroupVersionKind {
return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSlice"}
}
// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
func (in *EndpointsList) APILifecycleDeprecated() (major, minor int) {
return 1, 33
}
// APILifecycleReplacement returns the GVK of the replacement for the given API
func (in *EndpointsList) APILifecycleReplacement() schema.GroupVersionKind {
return schema.GroupVersionKind{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSliceList"}
}

View File

@ -217,7 +217,7 @@ type VolumeSource struct {
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
// The volume will be mounted read-only (ro) and non-executable files (noexec).
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
// +featureGate=ImageVolume
// +optional
@ -2437,9 +2437,9 @@ type SecretKeySelector struct {
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
// EnvFromSource represents the source of a set of ConfigMaps or Secrets
type EnvFromSource struct {
// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
@ -2980,6 +2980,78 @@ type LifecycleHandler struct {
Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
}
// Signal defines the stop signal of containers
// +enum
type Signal string
const (
SIGABRT Signal = "SIGABRT"
SIGALRM Signal = "SIGALRM"
SIGBUS Signal = "SIGBUS"
SIGCHLD Signal = "SIGCHLD"
SIGCLD Signal = "SIGCLD"
SIGCONT Signal = "SIGCONT"
SIGFPE Signal = "SIGFPE"
SIGHUP Signal = "SIGHUP"
SIGILL Signal = "SIGILL"
SIGINT Signal = "SIGINT"
SIGIO Signal = "SIGIO"
SIGIOT Signal = "SIGIOT"
SIGKILL Signal = "SIGKILL"
SIGPIPE Signal = "SIGPIPE"
SIGPOLL Signal = "SIGPOLL"
SIGPROF Signal = "SIGPROF"
SIGPWR Signal = "SIGPWR"
SIGQUIT Signal = "SIGQUIT"
SIGSEGV Signal = "SIGSEGV"
SIGSTKFLT Signal = "SIGSTKFLT"
SIGSTOP Signal = "SIGSTOP"
SIGSYS Signal = "SIGSYS"
SIGTERM Signal = "SIGTERM"
SIGTRAP Signal = "SIGTRAP"
SIGTSTP Signal = "SIGTSTP"
SIGTTIN Signal = "SIGTTIN"
SIGTTOU Signal = "SIGTTOU"
SIGURG Signal = "SIGURG"
SIGUSR1 Signal = "SIGUSR1"
SIGUSR2 Signal = "SIGUSR2"
SIGVTALRM Signal = "SIGVTALRM"
SIGWINCH Signal = "SIGWINCH"
SIGXCPU Signal = "SIGXCPU"
SIGXFSZ Signal = "SIGXFSZ"
SIGRTMIN Signal = "SIGRTMIN"
SIGRTMINPLUS1 Signal = "SIGRTMIN+1"
SIGRTMINPLUS2 Signal = "SIGRTMIN+2"
SIGRTMINPLUS3 Signal = "SIGRTMIN+3"
SIGRTMINPLUS4 Signal = "SIGRTMIN+4"
SIGRTMINPLUS5 Signal = "SIGRTMIN+5"
SIGRTMINPLUS6 Signal = "SIGRTMIN+6"
SIGRTMINPLUS7 Signal = "SIGRTMIN+7"
SIGRTMINPLUS8 Signal = "SIGRTMIN+8"
SIGRTMINPLUS9 Signal = "SIGRTMIN+9"
SIGRTMINPLUS10 Signal = "SIGRTMIN+10"
SIGRTMINPLUS11 Signal = "SIGRTMIN+11"
SIGRTMINPLUS12 Signal = "SIGRTMIN+12"
SIGRTMINPLUS13 Signal = "SIGRTMIN+13"
SIGRTMINPLUS14 Signal = "SIGRTMIN+14"
SIGRTMINPLUS15 Signal = "SIGRTMIN+15"
SIGRTMAXMINUS14 Signal = "SIGRTMAX-14"
SIGRTMAXMINUS13 Signal = "SIGRTMAX-13"
SIGRTMAXMINUS12 Signal = "SIGRTMAX-12"
SIGRTMAXMINUS11 Signal = "SIGRTMAX-11"
SIGRTMAXMINUS10 Signal = "SIGRTMAX-10"
SIGRTMAXMINUS9 Signal = "SIGRTMAX-9"
SIGRTMAXMINUS8 Signal = "SIGRTMAX-8"
SIGRTMAXMINUS7 Signal = "SIGRTMAX-7"
SIGRTMAXMINUS6 Signal = "SIGRTMAX-6"
SIGRTMAXMINUS5 Signal = "SIGRTMAX-5"
SIGRTMAXMINUS4 Signal = "SIGRTMAX-4"
SIGRTMAXMINUS3 Signal = "SIGRTMAX-3"
SIGRTMAXMINUS2 Signal = "SIGRTMAX-2"
SIGRTMAXMINUS1 Signal = "SIGRTMAX-1"
SIGRTMAX Signal = "SIGRTMAX"
)
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
@ -3001,6 +3073,11 @@ type Lifecycle struct {
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
// StopSignal defines which signal will be sent to a container when it is being stopped.
// If not specified, the default is defined by the container runtime in use.
// StopSignal can only be set for Pods with a non-empty .spec.os.name
// +optional
StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,3,opt,name=stopSignal"`
}
type ConditionStatus string
@ -3154,6 +3231,10 @@ type ContainerStatus struct {
// +listType=map
// +listMapKey=name
AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
// StopSignal reports the effective stop signal for this container
// +featureGate=ContainerStopSignals
// +optional
StopSignal *Signal `json:"stopSignal,omitempty" protobuf:"bytes,15,opt,name=stopSignal"`
}
// ResourceStatus represents the status of a single resource allocated to a Pod.
@ -3278,6 +3359,17 @@ const (
// PodReadyToStartContainers pod sandbox is successfully configured and
// the pod is ready to launch containers.
PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers"
// PodResizePending indicates that the pod has been resized, but kubelet has not
// yet allocated the resources. If both PodResizePending and PodResizeInProgress
// are set, it means that a new resize was requested in the middle of a previous
// pod resize that is still in progress.
PodResizePending PodConditionType = "PodResizePending"
// PodResizeInProgress indicates that a resize is in progress, and is present whenever
// the Kubelet has allocated resources for the resize, but has not yet actuated all of
// the required changes.
// If both PodResizePending and PodResizeInProgress are set, it means that a new resize was
// requested in the middle of a previous pod resize that is still in progress.
PodResizeInProgress PodConditionType = "PodResizeInProgress"
)
// These are reasons for a pod's transition to a condition.
@ -3301,6 +3393,18 @@ const (
// PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the
// disruption was initiated by scheduler's preemption.
PodReasonPreemptionByScheduler = "PreemptionByScheduler"
// PodReasonDeferred reason in PodResizePending pod condition indicates the proposed resize is feasible in
// theory (it fits on this node) but is not possible right now.
PodReasonDeferred = "Deferred"
// PodReasonInfeasible reason in PodResizePending pod condition indicates the proposed resize is not
// feasible and is rejected; it may not be re-evaluated
PodReasonInfeasible = "Infeasible"
// PodReasonError reason in PodResizeInProgress pod condition indicates that an error occurred while
// actuating the resize.
PodReasonError = "Error"
)
// PodCondition contains details for the current condition of this pod.
@ -3308,6 +3412,11 @@ type PodCondition struct {
// Type is the type of the condition.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// If set, this represents the .metadata.generation that the pod condition was set based upon.
// This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,7,opt,name=observedGeneration"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
@ -3326,12 +3435,10 @@ type PodCondition struct {
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PodResizeStatus shows status of desired resize of a pod's containers.
// Deprecated: PodResizeStatus shows status of desired resize of a pod's containers.
type PodResizeStatus string
const (
// Pod resources resize has been requested and will be evaluated by node.
PodResizeStatusProposed PodResizeStatus = "Proposed"
// Pod resources resize has been accepted by node and is being actuated.
PodResizeStatusInProgress PodResizeStatus = "InProgress"
// Node cannot resize the pod at this time and will keep retrying.
@ -3627,7 +3734,6 @@ type PodAffinityTerm struct {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@ -3640,7 +3746,6 @@ type PodAffinityTerm struct {
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
@ -3792,7 +3897,7 @@ type PodSpec struct {
// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
@ -4301,7 +4406,6 @@ type TopologySpreadConstraint struct {
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
//
// If this value is nil, the behavior is equivalent to the Honor policy.
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"`
// NodeTaintsPolicy indicates how we will treat node taints when calculating
@ -4311,7 +4415,6 @@ type TopologySpreadConstraint struct {
// - Ignore: node taints are ignored. All nodes are included.
//
// If this value is nil, the behavior is equivalent to the Ignore policy.
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"`
// MatchLabelKeys is a set of pod label keys to select the pods over which
@ -4841,6 +4944,11 @@ type EphemeralContainer struct {
// state of a system, especially if the node that hosts the pod cannot contact the control
// plane.
type PodStatus struct {
// If set, this represents the .metadata.generation that the pod status was set based upon.
// This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,17,opt,name=observedGeneration"`
// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
// The conditions array, the reason and message fields, and the individual container status
// arrays contain more detail about the pod's status.
@ -4968,6 +5076,9 @@ type PodStatus struct {
// Status of resources resize desired for pod's containers.
// It is empty if no resources resize is pending.
// Any changes to container resources will automatically set this to "Proposed"
// Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
// PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
// PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
// +featureGate=InPlacePodVerticalScaling
// +optional
Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"`
@ -5099,12 +5210,18 @@ type ReplicationControllerSpec struct {
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
// +k8s:optional
// +default=1
// +k8s:minimum=0
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
// +k8s:optional
// +default=0
// +k8s:minimum=0
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
@ -5334,14 +5451,27 @@ const (
// These are valid values for the TrafficDistribution field of a Service.
const (
// Indicates a preference for routing traffic to endpoints that are
// topologically proximate to the client. The interpretation of "topologically
// proximate" may vary across implementations and could encompass endpoints
// within the same node, rack, zone, or even region. Setting this value gives
// implementations permission to make different tradeoffs, e.g. optimizing for
// proximity rather than equal distribution of load. Users should not set this
// value if such tradeoffs are not acceptable.
// Indicates a preference for routing traffic to endpoints that are in the same
// zone as the client. Users should not set this value unless they have ensured
// that clients and endpoints are distributed in such a way that the "same zone"
// preference will not result in endpoints getting overloaded.
ServiceTrafficDistributionPreferClose = "PreferClose"
// Indicates a preference for routing traffic to endpoints that are in the same
// zone as the client. Users should not set this value unless they have ensured
// that clients and endpoints are distributed in such a way that the "same zone"
// preference will not result in endpoints getting overloaded.
// This is an alias for "PreferClose", but it is an Alpha feature and is only
// recognized if the PreferSameTrafficDistribution feature gate is enabled.
ServiceTrafficDistributionPreferSameZone = "PreferSameZone"
// Indicates a preference for routing traffic to endpoints that are on the same
// node as the client. Users should not set this value unless they have ensured
// that clients and endpoints are distributed in such a way that the "same node"
// preference will not result in endpoints getting overloaded.
// This is an Alpha feature and is only recognized if the
// PreferSameTrafficDistribution feature gate is enabled.
ServiceTrafficDistributionPreferSameNode = "PreferSameNode"
)
// These are the valid conditions of a service.
@ -5689,13 +5819,12 @@ type ServiceSpec struct {
// +optional
InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"`
// TrafficDistribution offers a way to express preferences for how traffic is
// distributed to Service endpoints. Implementations can use this field as a
// hint, but are not required to guarantee strict adherence. If the field is
// not set, the implementation will apply its default routing strategy. If set
// to "PreferClose", implementations should prioritize endpoints that are
// topologically close (e.g., same zone).
// This is a beta field and requires enabling ServiceTrafficDistribution feature.
// TrafficDistribution offers a way to express preferences for how traffic
// is distributed to Service endpoints. Implementations can use this field
// as a hint, but are not required to guarantee strict adherence. If the
// field is not set, the implementation will apply its default routing
// strategy. If set to "PreferClose", implementations should prioritize
// endpoints that are in the same zone.
// +featureGate=ServiceTrafficDistribution
// +optional
TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"`
@ -5888,6 +6017,11 @@ type ServiceAccountList struct {
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
//
// Endpoints is a legacy API and does not contain information about all Service features.
// Use discoveryv1.EndpointSlice for complete information about Service endpoints.
//
// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
@ -5920,6 +6054,8 @@ type Endpoints struct {
//
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
//
// Deprecated: This API is deprecated in v1.33+.
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
@ -5939,6 +6075,7 @@ type EndpointSubset struct {
}
// EndpointAddress is a tuple that describes single IP address.
// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
type EndpointAddress struct {
// The IP of this endpoint.
@ -5957,6 +6094,7 @@ type EndpointAddress struct {
}
// EndpointPort is a tuple that describes a single port.
// Deprecated: This API is deprecated in v1.33+.
// +structType=atomic
type EndpointPort struct {
// The name of this port. This must match the 'name' field in the
@ -5998,6 +6136,7 @@ type EndpointPort struct {
// +k8s:prerelease-lifecycle-gen:introduced=1.0
// EndpointsList is a list of endpoints.
// Deprecated: This API is deprecated in v1.33+.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
@ -6166,6 +6305,15 @@ type NodeSystemInfo struct {
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
// Swap Info reported by the node.
Swap *NodeSwapStatus `json:"swap,omitempty" protobuf:"bytes,11,opt,name=swap"`
}
// NodeSwapStatus represents swap memory information.
type NodeSwapStatus struct {
// Total amount of swap memory in bytes.
// +optional
Capacity *int64 `json:"capacity,omitempty" protobuf:"varint,1,opt,name=capacity"`
}
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
@ -7267,6 +7415,9 @@ const (
ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
// Match all pod objects that have cross-namespace pod (anti)affinity mentioned.
ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
// Match all pvc objects that have volume attributes class mentioned.
ResourceQuotaScopeVolumeAttributesClass ResourceQuotaScope = "VolumeAttributesClass"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.

View File

@ -474,6 +474,7 @@ var map_ContainerStatus = map[string]string{
"volumeMounts": "Status of volume mounts.",
"user": "User represents user identity information initially attached to the first process of the container",
"allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.",
"stopSignal": "StopSignal reports the effective stop signal for this container",
}
func (ContainerStatus) SwaggerDoc() map[string]string {
@ -540,7 +541,7 @@ func (EmptyDirVolumeSource) SwaggerDoc() map[string]string {
}
var map_EndpointAddress = map[string]string{
"": "EndpointAddress is a tuple that describes single IP address.",
"": "EndpointAddress is a tuple that describes single IP address. Deprecated: This API is deprecated in v1.33+.",
"ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).",
"hostname": "The Hostname of this endpoint",
"nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.",
@ -552,7 +553,7 @@ func (EndpointAddress) SwaggerDoc() map[string]string {
}
var map_EndpointPort = map[string]string{
"": "EndpointPort is a tuple that describes a single port.",
"": "EndpointPort is a tuple that describes a single port. Deprecated: This API is deprecated in v1.33+.",
"name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
"port": "The port number of the endpoint.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
@ -564,7 +565,7 @@ func (EndpointPort) SwaggerDoc() map[string]string {
}
var map_EndpointSubset = map[string]string{
"": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]",
"": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]\n\nDeprecated: This API is deprecated in v1.33+.",
"addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
"notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.",
"ports": "Port numbers available on the related IP addresses.",
@ -575,7 +576,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string {
}
var map_Endpoints = map[string]string{
"": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]",
"": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]\n\nEndpoints is a legacy API and does not contain information about all Service features. Use discoveryv1.EndpointSlice for complete information about Service endpoints.\n\nDeprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.",
}
@ -585,7 +586,7 @@ func (Endpoints) SwaggerDoc() map[string]string {
}
var map_EndpointsList = map[string]string{
"": "EndpointsList is a list of endpoints.",
"": "EndpointsList is a list of endpoints. Deprecated: This API is deprecated in v1.33+.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"items": "List of endpoints.",
}
@ -595,8 +596,8 @@ func (EndpointsList) SwaggerDoc() map[string]string {
}
var map_EnvFromSource = map[string]string{
"": "EnvFromSource represents the source of a set of ConfigMaps",
"prefix": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.",
"": "EnvFromSource represents the source of a set of ConfigMaps or Secrets",
"prefix": "Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.",
"configMapRef": "The ConfigMap to select from",
"secretRef": "The Secret to select from",
}
@ -957,9 +958,10 @@ func (KeyToPath) SwaggerDoc() map[string]string {
}
var map_Lifecycle = map[string]string{
"": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
"postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
"preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
"": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
"postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
"preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
"stopSignal": "StopSignal defines which signal will be sent to a container when it is being stopped. If not specified, the default is defined by the container runtime in use. StopSignal can only be set for Pods with a non-empty .spec.os.name",
}
func (Lifecycle) SwaggerDoc() map[string]string {
@ -1335,6 +1337,15 @@ func (NodeStatus) SwaggerDoc() map[string]string {
return map_NodeStatus
}
var map_NodeSwapStatus = map[string]string{
"": "NodeSwapStatus represents swap memory information.",
"capacity": "Total amount of swap memory in bytes.",
}
func (NodeSwapStatus) SwaggerDoc() map[string]string {
return map_NodeSwapStatus
}
var map_NodeSystemInfo = map[string]string{
"": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
"machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html",
@ -1347,6 +1358,7 @@ var map_NodeSystemInfo = map[string]string{
"kubeProxyVersion": "Deprecated: KubeProxy Version reported by the node.",
"operatingSystem": "The Operating System reported by the node",
"architecture": "The Architecture reported by the node",
"swap": "Swap Info reported by the node.",
}
func (NodeSystemInfo) SwaggerDoc() map[string]string {
@ -1583,8 +1595,8 @@ var map_PodAffinityTerm = map[string]string{
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set.",
"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set.",
}
func (PodAffinityTerm) SwaggerDoc() map[string]string {
@ -1617,6 +1629,7 @@ func (PodAttachOptions) SwaggerDoc() map[string]string {
var map_PodCondition = map[string]string{
"": "PodCondition contains details for the current condition of this pod.",
"type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"observedGeneration": "If set, this represents the .metadata.generation that the pod condition was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
"status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"lastProbeTime": "Last time we probed the condition.",
"lastTransitionTime": "Last time the condition transitioned from one status to another.",
@ -1799,7 +1812,7 @@ func (PodSignature) SwaggerDoc() map[string]string {
var map_PodSpec = map[string]string{
"": "PodSpec is a description of a pod.",
"volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
"initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
"initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
"containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
"ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.",
"restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
@ -1846,6 +1859,7 @@ func (PodSpec) SwaggerDoc() map[string]string {
var map_PodStatus = map[string]string{
"": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
"observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
"phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
"conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"message": "A human readable message indicating details about why the pod is in this condition.",
@ -1860,7 +1874,7 @@ var map_PodStatus = map[string]string{
"containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"",
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
"resourceClaimStatuses": "Status of resource claims.",
}
@ -2487,7 +2501,7 @@ var map_ServiceSpec = map[string]string{
"allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.",
"loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.",
"internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).",
"trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.",
"trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are in the same zone.",
}
func (ServiceSpec) SwaggerDoc() map[string]string {
@ -2619,8 +2633,8 @@ var map_TopologySpreadConstraint = map[string]string{
"whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
"labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
"minDomains": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ",
"nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
"nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
"nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy.",
"nodeTaintsPolicy": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy.",
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).",
}
@ -2760,7 +2774,7 @@ var map_VolumeSource = map[string]string{
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.",
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
"image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
"image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
}
func (VolumeSource) SwaggerDoc() map[string]string {

View File

@ -1055,6 +1055,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.StopSignal != nil {
in, out := &in.StopSignal, &out.StopSignal
*out = new(Signal)
**out = **in
}
return
}
@ -2101,6 +2106,11 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
*out = new(LifecycleHandler)
(*in).DeepCopyInto(*out)
}
if in.StopSignal != nil {
in, out := &in.StopSignal, &out.StopSignal
*out = new(Signal)
**out = **in
}
return
}
@ -3002,7 +3012,7 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
copy(*out, *in)
}
out.DaemonEndpoints = in.DaemonEndpoints
out.NodeInfo = in.NodeInfo
in.NodeInfo.DeepCopyInto(&out.NodeInfo)
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]ContainerImage, len(*in))
@ -3050,9 +3060,35 @@ func (in *NodeStatus) DeepCopy() *NodeStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSwapStatus) DeepCopyInto(out *NodeSwapStatus) {
*out = *in
if in.Capacity != nil {
in, out := &in.Capacity, &out.Capacity
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSwapStatus.
func (in *NodeSwapStatus) DeepCopy() *NodeSwapStatus {
if in == nil {
return nil
}
out := new(NodeSwapStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) {
*out = *in
if in.Swap != nil {
in, out := &in.Swap, &out.Swap
*out = new(NodeSwapStatus)
(*in).DeepCopyInto(*out)
}
return
}

View File

@ -20,4 +20,4 @@ limitations under the License.
// +k8s:prerelease-lifecycle-gen=true
// +groupName=rbac.authorization.k8s.io
package v1 // import "k8s.io/api/rbac/v1"
package v1

View File

@ -20,4 +20,4 @@ limitations under the License.
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
package v1 // import "k8s.io/api/storage/v1"
package v1

View File

@ -609,111 +609,114 @@ func init() {
}
var fileDescriptor_662262cc70094b41 = []byte{
// 1655 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xbd, 0x6f, 0x1b, 0xc9,
0x15, 0xd7, 0x8a, 0xd4, 0xd7, 0x50, 0xb2, 0xa4, 0x91, 0xe4, 0x30, 0x2a, 0x48, 0x61, 0xed, 0x24,
0xb2, 0x13, 0x2f, 0x6d, 0xd9, 0x31, 0x0c, 0x07, 0x2e, 0xb4, 0x12, 0x1d, 0x0b, 0x11, 0x25, 0x65,
0xa8, 0x18, 0x46, 0x90, 0x04, 0x1e, 0xed, 0x8e, 0xa8, 0xb1, 0xb8, 0x1f, 0xde, 0x19, 0x2a, 0x62,
0xaa, 0xa4, 0x49, 0x17, 0x20, 0x69, 0xf3, 0x57, 0x24, 0x40, 0xd2, 0x5c, 0x79, 0xc5, 0xc1, 0xd7,
0x19, 0x57, 0xb9, 0x22, 0xce, 0xbc, 0xfa, 0xae, 0xbc, 0x42, 0xd5, 0x61, 0x66, 0x87, 0xdc, 0x0f,
0x2e, 0x65, 0xa9, 0x61, 0xc7, 0x99, 0xf7, 0xde, 0xef, 0xbd, 0x99, 0xf7, 0xde, 0x6f, 0xde, 0x12,
0xfc, 0xe4, 0xf4, 0x09, 0x33, 0xa8, 0x57, 0xc1, 0x3e, 0xad, 0x30, 0xee, 0x05, 0xb8, 0x41, 0x2a,
0x67, 0x0f, 0x2a, 0x0d, 0xe2, 0x92, 0x00, 0x73, 0x62, 0x1b, 0x7e, 0xe0, 0x71, 0x0f, 0xae, 0x84,
0x6a, 0x06, 0xf6, 0xa9, 0xa1, 0xd4, 0x8c, 0xb3, 0x07, 0xab, 0xf7, 0x1a, 0x94, 0x9f, 0xb4, 0x8e,
0x0c, 0xcb, 0x73, 0x2a, 0x0d, 0xaf, 0xe1, 0x55, 0xa4, 0xf6, 0x51, 0xeb, 0x58, 0xae, 0xe4, 0x42,
0xfe, 0x0a, 0x51, 0x56, 0xf5, 0x98, 0x33, 0xcb, 0x0b, 0xb2, 0x3c, 0xad, 0x3e, 0x8a, 0x74, 0x1c,
0x6c, 0x9d, 0x50, 0x97, 0x04, 0xed, 0x8a, 0x7f, 0xda, 0x90, 0x46, 0x01, 0x61, 0x5e, 0x2b, 0xb0,
0xc8, 0xb5, 0xac, 0x58, 0xc5, 0x21, 0x1c, 0x67, 0xf9, 0xaa, 0x0c, 0xb3, 0x0a, 0x5a, 0x2e, 0xa7,
0xce, 0xa0, 0x9b, 0xc7, 0x9f, 0x32, 0x60, 0xd6, 0x09, 0x71, 0x70, 0xda, 0x4e, 0xff, 0xbf, 0x06,
0x66, 0xb6, 0xea, 0x3b, 0xdb, 0x01, 0x3d, 0x23, 0x01, 0x7c, 0x0d, 0xa6, 0x45, 0x44, 0x36, 0xe6,
0xb8, 0xa8, 0xad, 0x69, 0xeb, 0x85, 0x8d, 0xfb, 0x46, 0x74, 0xbf, 0x7d, 0x60, 0xc3, 0x3f, 0x6d,
0x88, 0x0d, 0x66, 0x08, 0x6d, 0xe3, 0xec, 0x81, 0xb1, 0x7f, 0xf4, 0x86, 0x58, 0xbc, 0x46, 0x38,
0x36, 0xe1, 0xbb, 0x4e, 0x79, 0xac, 0xdb, 0x29, 0x83, 0x68, 0x0f, 0xf5, 0x51, 0xe1, 0x73, 0x90,
0x67, 0x3e, 0xb1, 0x8a, 0xe3, 0x12, 0xfd, 0xb6, 0x91, 0x99, 0x3d, 0xa3, 0x1f, 0x51, 0xdd, 0x27,
0x96, 0x39, 0xab, 0x10, 0xf3, 0x62, 0x85, 0xa4, 0xbd, 0xfe, 0x3f, 0x0d, 0xcc, 0xf5, 0xb5, 0x76,
0x29, 0xe3, 0xf0, 0x0f, 0x03, 0xb1, 0x1b, 0x57, 0x8b, 0x5d, 0x58, 0xcb, 0xc8, 0x17, 0x94, 0x9f,
0xe9, 0xde, 0x4e, 0x2c, 0xee, 0x2a, 0x98, 0xa0, 0x9c, 0x38, 0xac, 0x38, 0xbe, 0x96, 0x5b, 0x2f,
0x6c, 0xac, 0x7d, 0x2a, 0x70, 0x73, 0x4e, 0x81, 0x4d, 0xec, 0x08, 0x33, 0x14, 0x5a, 0xeb, 0x5f,
0xe5, 0x63, 0x61, 0x8b, 0xe3, 0xc0, 0xa7, 0xe0, 0x06, 0xe6, 0x1c, 0x5b, 0x27, 0x88, 0xbc, 0x6d,
0xd1, 0x80, 0xd8, 0x32, 0xf8, 0x69, 0x13, 0x76, 0x3b, 0xe5, 0x1b, 0x9b, 0x09, 0x09, 0x4a, 0x69,
0x0a, 0x5b, 0xdf, 0xb3, 0x77, 0xdc, 0x63, 0x6f, 0xdf, 0xad, 0x79, 0x2d, 0x97, 0xcb, 0x6b, 0x55,
0xb6, 0x07, 0x09, 0x09, 0x4a, 0x69, 0x42, 0x0b, 0x2c, 0x9f, 0x79, 0xcd, 0x96, 0x43, 0x76, 0xe9,
0x31, 0xb1, 0xda, 0x56, 0x93, 0xd4, 0x3c, 0x9b, 0xb0, 0x62, 0x6e, 0x2d, 0xb7, 0x3e, 0x63, 0x56,
0xba, 0x9d, 0xf2, 0xf2, 0xcb, 0x0c, 0xf9, 0x45, 0xa7, 0xbc, 0x94, 0xb1, 0x8f, 0x32, 0xc1, 0xe0,
0x33, 0x30, 0xaf, 0x2e, 0x67, 0x0b, 0xfb, 0xd8, 0xa2, 0xbc, 0x5d, 0xcc, 0xcb, 0x08, 0x97, 0xba,
0x9d, 0xf2, 0x7c, 0x3d, 0x29, 0x42, 0x69, 0x5d, 0xf8, 0x02, 0xcc, 0x1d, 0xb3, 0x5f, 0x07, 0x5e,
0xcb, 0x3f, 0xf0, 0x9a, 0xd4, 0x6a, 0x17, 0x27, 0xd6, 0xb4, 0xf5, 0x19, 0x53, 0xef, 0x76, 0xca,
0x73, 0xcf, 0xeb, 0x31, 0xc1, 0x45, 0x7a, 0x03, 0x25, 0x0d, 0xe1, 0x6b, 0x30, 0xc7, 0xbd, 0x53,
0xe2, 0x8a, 0xab, 0x23, 0x8c, 0xb3, 0xe2, 0xa4, 0x4c, 0xe3, 0xad, 0x21, 0x69, 0x3c, 0x8c, 0xe9,
0x9a, 0x2b, 0x2a, 0x93, 0x73, 0xf1, 0x5d, 0x86, 0x92, 0x80, 0x70, 0x0b, 0x2c, 0x06, 0x61, 0x5e,
0x18, 0x22, 0x7e, 0xeb, 0xa8, 0x49, 0xd9, 0x49, 0x71, 0x4a, 0x1e, 0x76, 0xa5, 0xdb, 0x29, 0x2f,
0xa2, 0xb4, 0x10, 0x0d, 0xea, 0xc3, 0x47, 0x60, 0x96, 0x91, 0x5d, 0xea, 0xb6, 0xce, 0xc3, 0x74,
0x4e, 0x4b, 0xfb, 0x85, 0x6e, 0xa7, 0x3c, 0x5b, 0xaf, 0x46, 0xfb, 0x28, 0xa1, 0xa5, 0xff, 0x57,
0x03, 0x53, 0x5b, 0xf5, 0x9d, 0x3d, 0xcf, 0x26, 0x23, 0xe8, 0xe0, 0xed, 0x44, 0x07, 0xeb, 0xc3,
0x1b, 0x41, 0xc4, 0x33, 0xb4, 0x7f, 0xbf, 0x0b, 0xfb, 0x57, 0xe8, 0x28, 0xee, 0x59, 0x03, 0x79,
0x17, 0x3b, 0x44, 0x46, 0x3d, 0x13, 0xd9, 0xec, 0x61, 0x87, 0x20, 0x29, 0x81, 0x3f, 0x05, 0x93,
0xae, 0x67, 0x93, 0x9d, 0x6d, 0xe9, 0x7b, 0xc6, 0xbc, 0xa1, 0x74, 0x26, 0xf7, 0xe4, 0x2e, 0x52,
0x52, 0x71, 0x8b, 0xdc, 0xf3, 0xbd, 0xa6, 0xd7, 0x68, 0xff, 0x86, 0xb4, 0x7b, 0x25, 0x2d, 0x6f,
0xf1, 0x30, 0xb6, 0x8f, 0x12, 0x5a, 0xf0, 0x8f, 0xa0, 0x80, 0x9b, 0x4d, 0xcf, 0xc2, 0x1c, 0x1f,
0x35, 0x89, 0xac, 0xd3, 0xc2, 0xc6, 0xdd, 0x21, 0xc7, 0x0b, 0x5b, 0x40, 0xf8, 0x45, 0x8a, 0xf8,
0x99, 0x39, 0xdf, 0xed, 0x94, 0x0b, 0x9b, 0x11, 0x04, 0x8a, 0xe3, 0xe9, 0xff, 0xd1, 0x40, 0x41,
0x1d, 0x78, 0x04, 0x74, 0xb5, 0x95, 0xa4, 0xab, 0xd2, 0xe5, 0x59, 0x1a, 0x42, 0x56, 0x7f, 0xea,
0x47, 0x2c, 0x99, 0x6a, 0x1f, 0x4c, 0xd9, 0x32, 0x55, 0xac, 0xa8, 0x49, 0xd4, 0xdb, 0x97, 0xa3,
0x2a, 0x22, 0x9c, 0x57, 0xd8, 0x53, 0xe1, 0x9a, 0xa1, 0x1e, 0x8a, 0xfe, 0x7d, 0x0e, 0xc0, 0xad,
0xfa, 0x4e, 0x8a, 0x06, 0x46, 0x50, 0xc2, 0x14, 0xcc, 0x8a, 0x52, 0xe9, 0x15, 0x83, 0x2a, 0xe5,
0x87, 0x57, 0xbc, 0x7f, 0x7c, 0x44, 0x9a, 0x75, 0xd2, 0x24, 0x16, 0xf7, 0x82, 0xb0, 0xaa, 0xf6,
0x62, 0x60, 0x28, 0x01, 0x0d, 0xb7, 0xc1, 0x42, 0x8f, 0xd5, 0x9a, 0x98, 0x31, 0x51, 0xcd, 0xc5,
0x9c, 0xac, 0xde, 0xa2, 0x0a, 0x71, 0xa1, 0x9e, 0x92, 0xa3, 0x01, 0x0b, 0xf8, 0x0a, 0x4c, 0x5b,
0x71, 0x02, 0xfd, 0x44, 0xb1, 0x18, 0xbd, 0x69, 0xc4, 0xf8, 0x6d, 0x0b, 0xbb, 0x9c, 0xf2, 0xb6,
0x39, 0x2b, 0x0a, 0xa5, 0xcf, 0xb4, 0x7d, 0x34, 0xc8, 0xc0, 0xa2, 0x83, 0xcf, 0xa9, 0xd3, 0x72,
0xc2, 0x92, 0xae, 0xd3, 0xbf, 0x10, 0x49, 0xb3, 0xd7, 0x77, 0x21, 0x69, 0xae, 0x96, 0x06, 0x43,
0x83, 0xf8, 0xfa, 0x17, 0x1a, 0xb8, 0x39, 0x98, 0xf8, 0x11, 0xb4, 0xc5, 0x5e, 0xb2, 0x2d, 0xee,
0x0c, 0x2f, 0xe0, 0x54, 0x6c, 0x43, 0x3a, 0xe4, 0x1f, 0x93, 0x60, 0x36, 0x9e, 0xbe, 0x11, 0xd4,
0xee, 0x2f, 0x41, 0xc1, 0x0f, 0xbc, 0x33, 0xca, 0xa8, 0xe7, 0x92, 0x40, 0x31, 0xe1, 0x92, 0x32,
0x29, 0x1c, 0x44, 0x22, 0x14, 0xd7, 0x83, 0x0d, 0x00, 0x7c, 0x1c, 0x60, 0x87, 0x70, 0xd1, 0xbf,
0x39, 0x79, 0xfc, 0x87, 0x43, 0x8e, 0x1f, 0x3f, 0x91, 0x71, 0xd0, 0xb7, 0xaa, 0xba, 0x3c, 0x68,
0x47, 0xd1, 0x45, 0x02, 0x14, 0x83, 0x86, 0xa7, 0x60, 0x2e, 0x20, 0x56, 0x13, 0x53, 0x47, 0xbd,
0xd9, 0x79, 0x19, 0x61, 0x55, 0x3c, 0xa0, 0x28, 0x2e, 0xb8, 0xe8, 0x94, 0xef, 0x0f, 0x4e, 0xdd,
0xc6, 0x01, 0x09, 0x18, 0x65, 0x9c, 0xb8, 0x3c, 0x2c, 0x98, 0x84, 0x0d, 0x4a, 0x62, 0x0b, 0xa6,
0x77, 0xc4, 0x13, 0xb8, 0xef, 0x73, 0xea, 0xb9, 0xac, 0x38, 0x11, 0x31, 0x7d, 0x2d, 0xb6, 0x8f,
0x12, 0x5a, 0x70, 0x17, 0x2c, 0x0b, 0x66, 0xfe, 0x73, 0xe8, 0xa0, 0x7a, 0xee, 0x63, 0x57, 0xdc,
0x52, 0x71, 0x52, 0xbe, 0xb6, 0x45, 0x31, 0xfa, 0x6c, 0x66, 0xc8, 0x51, 0xa6, 0x15, 0x7c, 0x05,
0x16, 0xc3, 0xd9, 0xc7, 0xa4, 0xae, 0x4d, 0xdd, 0x86, 0x98, 0x7c, 0xe4, 0xc3, 0x3f, 0x63, 0xde,
0x15, 0x1d, 0xf1, 0x32, 0x2d, 0xbc, 0xc8, 0xda, 0x44, 0x83, 0x20, 0xf0, 0x2d, 0x58, 0x94, 0x1e,
0x89, 0xad, 0xe8, 0x84, 0x12, 0x56, 0x9c, 0x96, 0xa9, 0x5b, 0x8f, 0xa7, 0x4e, 0x5c, 0x5d, 0x38,
0xb5, 0x84, 0xa4, 0xd3, 0x23, 0xa7, 0x43, 0x12, 0x38, 0xe6, 0x8f, 0x55, 0xbe, 0x16, 0x37, 0xd3,
0x50, 0x68, 0x10, 0x7d, 0xf5, 0x19, 0x98, 0x4f, 0x25, 0x1c, 0x2e, 0x80, 0xdc, 0x29, 0x69, 0x87,
0xcf, 0x32, 0x12, 0x3f, 0xe1, 0x32, 0x98, 0x38, 0xc3, 0xcd, 0x16, 0x09, 0x8b, 0x0f, 0x85, 0x8b,
0xa7, 0xe3, 0x4f, 0x34, 0xfd, 0x33, 0x0d, 0x24, 0xe8, 0x6c, 0x04, 0x2d, 0xfd, 0x22, 0xd9, 0xd2,
0xb7, 0xae, 0x50, 0xd3, 0x43, 0x9a, 0xf9, 0x6f, 0x1a, 0x98, 0x8d, 0x8f, 0x78, 0xf0, 0x17, 0x60,
0x1a, 0xb7, 0x6c, 0x4a, 0x5c, 0xab, 0x37, 0x95, 0xf4, 0x03, 0xd9, 0x54, 0xfb, 0xa8, 0xaf, 0x21,
0x06, 0x40, 0x72, 0xee, 0xd3, 0x00, 0x8b, 0x22, 0xab, 0x13, 0xcb, 0x73, 0x6d, 0x26, 0x6f, 0x28,
0x17, 0x32, 0x63, 0x35, 0x2d, 0x44, 0x83, 0xfa, 0xfa, 0xbf, 0xc7, 0xc1, 0x42, 0x58, 0x1b, 0xe1,
0xe8, 0xef, 0x10, 0x97, 0x8f, 0x80, 0x54, 0x6a, 0x89, 0x99, 0xee, 0xe7, 0x97, 0x0e, 0x3d, 0x51,
0x60, 0xc3, 0x86, 0x3b, 0xf8, 0x3b, 0x30, 0xc9, 0x38, 0xe6, 0x2d, 0x26, 0x9f, 0xba, 0xc2, 0xc6,
0xbd, 0xab, 0x02, 0x4a, 0xa3, 0x68, 0xae, 0x0b, 0xd7, 0x48, 0x81, 0xe9, 0x9f, 0x6b, 0x60, 0x39,
0x6d, 0x32, 0x82, 0x0a, 0xdb, 0x4d, 0x56, 0xd8, 0xcf, 0xae, 0x78, 0x98, 0x61, 0x5f, 0x80, 0x1a,
0xb8, 0x39, 0x70, 0x6e, 0xf9, 0x92, 0x0a, 0x5e, 0xf2, 0x53, 0xec, 0xb7, 0x17, 0x4d, 0xc4, 0x92,
0x97, 0x0e, 0x32, 0xe4, 0x28, 0xd3, 0x0a, 0xbe, 0x01, 0x0b, 0xd4, 0x6d, 0x52, 0x97, 0xa8, 0x87,
0x37, 0xca, 0x6f, 0x26, 0x79, 0xa4, 0x91, 0x65, 0x72, 0x97, 0xc5, 0x7c, 0xb2, 0x93, 0x42, 0x41,
0x03, 0xb8, 0xfa, 0x97, 0x19, 0x99, 0x91, 0x33, 0xa3, 0x68, 0x21, 0xb9, 0x43, 0x82, 0x81, 0x16,
0x52, 0xfb, 0xa8, 0xaf, 0x21, 0xeb, 0x46, 0x5e, 0x85, 0x0a, 0xf4, 0xca, 0x75, 0x23, 0x8d, 0x62,
0x75, 0x23, 0xd7, 0x48, 0x81, 0x89, 0x20, 0xc4, 0x4c, 0x16, 0x9b, 0xbd, 0xfa, 0x41, 0xec, 0xa9,
0x7d, 0xd4, 0xd7, 0xd0, 0xbf, 0xcd, 0x65, 0x24, 0x48, 0x16, 0x60, 0xec, 0x34, 0xbd, 0xaf, 0xf4,
0xf4, 0x69, 0xec, 0xfe, 0x69, 0x6c, 0xf8, 0x2f, 0x0d, 0x40, 0xdc, 0x87, 0xa8, 0xf5, 0x0a, 0x34,
0xac, 0xa2, 0xea, 0xb5, 0x5a, 0xc2, 0xd8, 0x1c, 0xc0, 0x09, 0x5f, 0xe3, 0x55, 0xe5, 0x1f, 0x0e,
0x2a, 0xa0, 0x0c, 0xe7, 0xd0, 0x06, 0x85, 0x70, 0xb7, 0x1a, 0x04, 0x5e, 0xa0, 0xda, 0x53, 0xbf,
0x34, 0x16, 0xa9, 0x69, 0x96, 0xe4, 0xc7, 0x4d, 0x64, 0x7a, 0xd1, 0x29, 0x17, 0x62, 0x72, 0x14,
0x87, 0x15, 0x5e, 0x6c, 0x12, 0x79, 0xc9, 0x5f, 0xcf, 0xcb, 0x36, 0x19, 0xee, 0x25, 0x06, 0xbb,
0x5a, 0x05, 0x3f, 0x1a, 0x72, 0x2d, 0xd7, 0x7a, 0xb3, 0xfe, 0xae, 0x81, 0xb8, 0x0f, 0xb8, 0x0b,
0xf2, 0x9c, 0xaa, 0xae, 0x4b, 0x7e, 0x00, 0x5e, 0x42, 0x24, 0x87, 0xd4, 0x21, 0x11, 0x15, 0x8a,
0x15, 0x92, 0x28, 0xf0, 0x0e, 0x98, 0x72, 0x08, 0x63, 0xb8, 0xa1, 0x3c, 0x47, 0x9f, 0x43, 0xb5,
0x70, 0x1b, 0xf5, 0xe4, 0xfa, 0x63, 0xb0, 0x94, 0xf1, 0x59, 0x09, 0xcb, 0x60, 0xc2, 0x92, 0x7f,
0x06, 0x88, 0x80, 0x26, 0xcc, 0x19, 0xc1, 0x28, 0x5b, 0xf2, 0x5f, 0x80, 0x70, 0xdf, 0xfc, 0xd5,
0xbb, 0x8f, 0xa5, 0xb1, 0xf7, 0x1f, 0x4b, 0x63, 0x1f, 0x3e, 0x96, 0xc6, 0xfe, 0xda, 0x2d, 0x69,
0xef, 0xba, 0x25, 0xed, 0x7d, 0xb7, 0xa4, 0x7d, 0xe8, 0x96, 0xb4, 0xaf, 0xbb, 0x25, 0xed, 0x9f,
0xdf, 0x94, 0xc6, 0x7e, 0xbf, 0x92, 0xf9, 0x77, 0xea, 0x0f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7a,
0x55, 0x95, 0x9f, 0x66, 0x15, 0x00, 0x00,
// 1711 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x73, 0x2b, 0x47,
0x15, 0xf6, 0x58, 0x96, 0x6d, 0xb5, 0xac, 0x6b, 0xbb, 0xaf, 0x1d, 0x06, 0x2f, 0x24, 0xd7, 0x24,
0x04, 0x27, 0x21, 0xa3, 0x5c, 0x27, 0xa4, 0x52, 0xa1, 0xb2, 0xf0, 0xc8, 0x0a, 0x71, 0x61, 0xd9,
0xa6, 0xe5, 0xa4, 0x52, 0x14, 0x50, 0x69, 0xcf, 0xb4, 0xe5, 0x8e, 0x35, 0x8f, 0x4c, 0xb7, 0x84,
0xc5, 0x0a, 0x7e, 0x00, 0x55, 0xb0, 0xe5, 0x57, 0x40, 0x01, 0x1b, 0x96, 0x2c, 0xa8, 0x0b, 0xab,
0x14, 0xab, 0xbb, 0x52, 0x71, 0xc5, 0x1a, 0x96, 0x2c, 0xbc, 0x4a, 0x75, 0x4f, 0x4b, 0xf3, 0xd0,
0xc8, 0x8f, 0x8d, 0x76, 0xea, 0xf3, 0xf8, 0xce, 0xe9, 0x3e, 0xa7, 0xbf, 0x3e, 0x23, 0xf0, 0x9d,
0xeb, 0x0f, 0x98, 0x49, 0xfd, 0x3a, 0x0e, 0x68, 0x9d, 0x71, 0x3f, 0xc4, 0x1d, 0x52, 0xef, 0x3f,
0xab, 0x77, 0x88, 0x47, 0x42, 0xcc, 0x89, 0x63, 0x06, 0xa1, 0xcf, 0x7d, 0xb8, 0x1d, 0x99, 0x99,
0x38, 0xa0, 0xa6, 0x32, 0x33, 0xfb, 0xcf, 0x76, 0xde, 0xee, 0x50, 0x7e, 0xd5, 0xbb, 0x30, 0x6d,
0xdf, 0xad, 0x77, 0xfc, 0x8e, 0x5f, 0x97, 0xd6, 0x17, 0xbd, 0x4b, 0xb9, 0x92, 0x0b, 0xf9, 0x2b,
0x42, 0xd9, 0x31, 0x12, 0xc1, 0x6c, 0x3f, 0xcc, 0x8b, 0xb4, 0xf3, 0x5e, 0x6c, 0xe3, 0x62, 0xfb,
0x8a, 0x7a, 0x24, 0x1c, 0xd4, 0x83, 0xeb, 0x8e, 0x74, 0x0a, 0x09, 0xf3, 0x7b, 0xa1, 0x4d, 0x1e,
0xe5, 0xc5, 0xea, 0x2e, 0xe1, 0x38, 0x2f, 0x56, 0x7d, 0x96, 0x57, 0xd8, 0xf3, 0x38, 0x75, 0xa7,
0xc3, 0xbc, 0x7f, 0x9f, 0x03, 0xb3, 0xaf, 0x88, 0x8b, 0xb3, 0x7e, 0xc6, 0x5f, 0x34, 0x50, 0x6a,
0xb4, 0x8f, 0x0e, 0x43, 0xda, 0x27, 0x21, 0xfc, 0x02, 0xac, 0x8a, 0x8c, 0x1c, 0xcc, 0xb1, 0xae,
0xed, 0x6a, 0x7b, 0xe5, 0xfd, 0x77, 0xcc, 0xf8, 0x7c, 0x27, 0xc0, 0x66, 0x70, 0xdd, 0x11, 0x02,
0x66, 0x0a, 0x6b, 0xb3, 0xff, 0xcc, 0x3c, 0xbd, 0xf8, 0x92, 0xd8, 0xbc, 0x45, 0x38, 0xb6, 0xe0,
0xf3, 0x61, 0x6d, 0x61, 0x34, 0xac, 0x81, 0x58, 0x86, 0x26, 0xa8, 0xf0, 0x63, 0xb0, 0xc4, 0x02,
0x62, 0xeb, 0x8b, 0x12, 0xfd, 0x35, 0x33, 0xb7, 0x7a, 0xe6, 0x24, 0xa3, 0x76, 0x40, 0x6c, 0x6b,
0x4d, 0x21, 0x2e, 0x89, 0x15, 0x92, 0xfe, 0xc6, 0x9f, 0x35, 0x50, 0x99, 0x58, 0x1d, 0x53, 0xc6,
0xe1, 0x4f, 0xa7, 0x72, 0x37, 0x1f, 0x96, 0xbb, 0xf0, 0x96, 0x99, 0x6f, 0xa8, 0x38, 0xab, 0x63,
0x49, 0x22, 0xef, 0x26, 0x28, 0x52, 0x4e, 0x5c, 0xa6, 0x2f, 0xee, 0x16, 0xf6, 0xca, 0xfb, 0xbb,
0xf7, 0x25, 0x6e, 0x55, 0x14, 0x58, 0xf1, 0x48, 0xb8, 0xa1, 0xc8, 0xdb, 0xf8, 0x67, 0x31, 0x91,
0xb6, 0xd8, 0x0e, 0xfc, 0x10, 0x3c, 0xc1, 0x9c, 0x63, 0xfb, 0x0a, 0x91, 0xaf, 0x7a, 0x34, 0x24,
0x8e, 0x4c, 0x7e, 0xd5, 0x82, 0xa3, 0x61, 0xed, 0xc9, 0x41, 0x4a, 0x83, 0x32, 0x96, 0xc2, 0x37,
0xf0, 0x9d, 0x23, 0xef, 0xd2, 0x3f, 0xf5, 0x5a, 0x7e, 0xcf, 0xe3, 0xf2, 0x58, 0x95, 0xef, 0x59,
0x4a, 0x83, 0x32, 0x96, 0xd0, 0x06, 0x5b, 0x7d, 0xbf, 0xdb, 0x73, 0xc9, 0x31, 0xbd, 0x24, 0xf6,
0xc0, 0xee, 0x92, 0x96, 0xef, 0x10, 0xa6, 0x17, 0x76, 0x0b, 0x7b, 0x25, 0xab, 0x3e, 0x1a, 0xd6,
0xb6, 0x3e, 0xcb, 0xd1, 0xdf, 0x0e, 0x6b, 0x4f, 0x73, 0xe4, 0x28, 0x17, 0x0c, 0x7e, 0x04, 0xd6,
0xd5, 0xe1, 0x34, 0x70, 0x80, 0x6d, 0xca, 0x07, 0xfa, 0x92, 0xcc, 0xf0, 0xe9, 0x68, 0x58, 0x5b,
0x6f, 0xa7, 0x55, 0x28, 0x6b, 0x0b, 0x3f, 0x01, 0x95, 0x4b, 0xf6, 0xc3, 0xd0, 0xef, 0x05, 0x67,
0x7e, 0x97, 0xda, 0x03, 0xbd, 0xb8, 0xab, 0xed, 0x95, 0x2c, 0x63, 0x34, 0xac, 0x55, 0x3e, 0x6e,
0x27, 0x14, 0xb7, 0x59, 0x01, 0x4a, 0x3b, 0xc2, 0x2f, 0x40, 0x85, 0xfb, 0xd7, 0xc4, 0x13, 0x47,
0x47, 0x18, 0x67, 0xfa, 0xb2, 0x2c, 0xe3, 0xab, 0x33, 0xca, 0x78, 0x9e, 0xb0, 0xb5, 0xb6, 0x55,
0x25, 0x2b, 0x49, 0x29, 0x43, 0x69, 0x40, 0xd8, 0x00, 0x9b, 0x61, 0x54, 0x17, 0x86, 0x48, 0xd0,
0xbb, 0xe8, 0x52, 0x76, 0xa5, 0xaf, 0xc8, 0xcd, 0x6e, 0x8f, 0x86, 0xb5, 0x4d, 0x94, 0x55, 0xa2,
0x69, 0x7b, 0xf8, 0x1e, 0x58, 0x63, 0xe4, 0x98, 0x7a, 0xbd, 0x9b, 0xa8, 0x9c, 0xab, 0xd2, 0x7f,
0x63, 0x34, 0xac, 0xad, 0xb5, 0x9b, 0xb1, 0x1c, 0xa5, 0xac, 0x60, 0x1f, 0x18, 0x9e, 0xef, 0x90,
0x83, 0x6e, 0xd7, 0xb7, 0x31, 0xc7, 0x17, 0x5d, 0xf2, 0x69, 0xe0, 0x60, 0x4e, 0xce, 0x48, 0x48,
0x7d, 0xa7, 0x4d, 0x6c, 0xdf, 0x73, 0x98, 0x5e, 0xda, 0xd5, 0xf6, 0x0a, 0xd6, 0xeb, 0xa3, 0x61,
0xcd, 0x38, 0xb9, 0xd7, 0x1a, 0x3d, 0x00, 0xd1, 0xf8, 0xa3, 0x06, 0x56, 0x1a, 0xed, 0x23, 0x81,
0x36, 0x07, 0xe6, 0x38, 0x4c, 0x31, 0x87, 0x31, 0xfb, 0x02, 0x8a, 0x7c, 0x66, 0xf2, 0xc6, 0xff,
0x22, 0xde, 0x10, 0x36, 0x8a, 0xf3, 0x76, 0xc1, 0x92, 0x87, 0x5d, 0x22, 0xb3, 0x2e, 0xc5, 0x3e,
0x27, 0xd8, 0x25, 0x48, 0x6a, 0xe0, 0xeb, 0x60, 0x59, 0x9c, 0xc6, 0xd1, 0xa1, 0x8c, 0x5d, 0xb2,
0x9e, 0x28, 0x9b, 0xe5, 0x13, 0x29, 0x45, 0x4a, 0x2b, 0xaa, 0xc7, 0xfd, 0xc0, 0xef, 0xfa, 0x9d,
0xc1, 0x8f, 0xc8, 0x60, 0x7c, 0x95, 0x64, 0xf5, 0xce, 0x13, 0x72, 0x94, 0xb2, 0x82, 0x3f, 0x03,
0x65, 0x1c, 0x9f, 0xb3, 0xbc, 0x1f, 0xe5, 0xfd, 0x37, 0x67, 0x6c, 0x2f, 0xba, 0x7a, 0x22, 0x2e,
0x52, 0x0f, 0x0e, 0xb3, 0xd6, 0x47, 0xc3, 0x5a, 0x39, 0x51, 0x2a, 0x94, 0xc4, 0x33, 0xfe, 0xa0,
0x81, 0xb2, 0xda, 0xf0, 0x1c, 0x68, 0xb2, 0x91, 0xa6, 0xc9, 0xea, 0xdd, 0x55, 0x9a, 0x41, 0x92,
0x3f, 0x9f, 0x64, 0x2c, 0x19, 0xf2, 0x14, 0xac, 0x38, 0xb2, 0x54, 0x4c, 0xd7, 0x24, 0xea, 0x6b,
0x77, 0xa3, 0x2a, 0x02, 0x5e, 0x57, 0xd8, 0x2b, 0xd1, 0x9a, 0xa1, 0x31, 0x8a, 0xf1, 0xff, 0x02,
0x80, 0x8d, 0xf6, 0x51, 0x86, 0x7e, 0xe6, 0xd0, 0xc2, 0x14, 0xac, 0x89, 0x56, 0x19, 0x37, 0x83,
0x6a, 0xe5, 0x77, 0x1f, 0x78, 0xfe, 0xf8, 0x82, 0x74, 0xdb, 0xa4, 0x4b, 0x6c, 0xee, 0x87, 0x51,
0x57, 0x9d, 0x24, 0xc0, 0x50, 0x0a, 0x1a, 0x1e, 0x82, 0x8d, 0x31, 0x9b, 0x76, 0x31, 0x63, 0xa2,
0x9b, 0xf5, 0x82, 0xec, 0x5e, 0x5d, 0xa5, 0xb8, 0xd1, 0xce, 0xe8, 0xd1, 0x94, 0x07, 0xfc, 0x1c,
0xac, 0xda, 0x49, 0xe2, 0xbe, 0xa7, 0x59, 0xcc, 0xf1, 0x14, 0x64, 0xfe, 0xb8, 0x87, 0x3d, 0x4e,
0xf9, 0xc0, 0x5a, 0x13, 0x8d, 0x32, 0x61, 0xf8, 0x09, 0x1a, 0x64, 0x60, 0xd3, 0xc5, 0x37, 0xd4,
0xed, 0xb9, 0x51, 0x4b, 0xb7, 0xe9, 0x2f, 0x89, 0xa4, 0xf7, 0xc7, 0x87, 0x90, 0xf4, 0xda, 0xca,
0x82, 0xa1, 0x69, 0x7c, 0xe3, 0xef, 0x1a, 0x78, 0x65, 0xba, 0xf0, 0x73, 0xb8, 0x16, 0x27, 0xe9,
0x6b, 0xf1, 0xc6, 0xec, 0x06, 0xce, 0xe4, 0x36, 0xe3, 0x86, 0xfc, 0x66, 0x19, 0xac, 0x25, 0xcb,
0x37, 0x87, 0xde, 0xfd, 0x3e, 0x28, 0x07, 0xa1, 0xdf, 0xa7, 0x8c, 0xfa, 0x1e, 0x09, 0x15, 0x13,
0x3e, 0x55, 0x2e, 0xe5, 0xb3, 0x58, 0x85, 0x92, 0x76, 0xb0, 0x03, 0x40, 0x80, 0x43, 0xec, 0x12,
0x2e, 0xee, 0x6f, 0x41, 0x6e, 0xff, 0xdd, 0x19, 0xdb, 0x4f, 0xee, 0xc8, 0x3c, 0x9b, 0x78, 0x35,
0x3d, 0x1e, 0x0e, 0xe2, 0xec, 0x62, 0x05, 0x4a, 0x40, 0xc3, 0x6b, 0x50, 0x09, 0x89, 0xdd, 0xc5,
0xd4, 0x55, 0xb3, 0xc2, 0x92, 0xcc, 0xb0, 0x29, 0x1e, 0x6e, 0x94, 0x54, 0xdc, 0x0e, 0x6b, 0xef,
0x4c, 0x4f, 0xfb, 0xe6, 0x19, 0x09, 0x19, 0x65, 0x9c, 0x78, 0x3c, 0x6a, 0x98, 0x94, 0x0f, 0x4a,
0x63, 0x0b, 0xa6, 0x77, 0xc5, 0xd3, 0x7b, 0x1a, 0x70, 0xea, 0x7b, 0x4c, 0x2f, 0xc6, 0x4c, 0xdf,
0x4a, 0xc8, 0x51, 0xca, 0x0a, 0x1e, 0x83, 0x2d, 0xc1, 0xcc, 0xbf, 0x88, 0x02, 0x34, 0x6f, 0x02,
0xec, 0x89, 0x53, 0xd2, 0x97, 0xe5, 0x2b, 0xaf, 0x8b, 0x91, 0xeb, 0x20, 0x47, 0x8f, 0x72, 0xbd,
0xe0, 0xe7, 0x60, 0x33, 0x9a, 0xb9, 0x2c, 0xea, 0x39, 0xd4, 0xeb, 0x88, 0x89, 0x4b, 0x0e, 0x1c,
0x25, 0xeb, 0x4d, 0x71, 0x23, 0x3e, 0xcb, 0x2a, 0x6f, 0xf3, 0x84, 0x68, 0x1a, 0x04, 0x7e, 0x05,
0x36, 0x65, 0x44, 0xe2, 0x28, 0x3a, 0xa1, 0x84, 0xe9, 0xab, 0xb2, 0x74, 0x7b, 0xc9, 0xd2, 0x89,
0xa3, 0x8b, 0xa6, 0xa5, 0x88, 0x74, 0xc6, 0xe4, 0x74, 0x4e, 0x42, 0xd7, 0xfa, 0xb6, 0xaa, 0xd7,
0xe6, 0x41, 0x16, 0x0a, 0x4d, 0xa3, 0xef, 0x7c, 0x04, 0xd6, 0x33, 0x05, 0x87, 0x1b, 0xa0, 0x70,
0x4d, 0x06, 0xd1, 0xb3, 0x8c, 0xc4, 0x4f, 0xb8, 0x05, 0x8a, 0x7d, 0xdc, 0xed, 0x91, 0xa8, 0xf9,
0x50, 0xb4, 0xf8, 0x70, 0xf1, 0x03, 0xcd, 0xf8, 0xab, 0x06, 0x52, 0x74, 0x36, 0x87, 0x2b, 0xfd,
0x49, 0xfa, 0x4a, 0xbf, 0xfa, 0x80, 0x9e, 0x9e, 0x71, 0x99, 0x7f, 0xad, 0x81, 0xb5, 0xe4, 0x68,
0x09, 0xbf, 0x07, 0x56, 0x71, 0xcf, 0xa1, 0xc4, 0xb3, 0xc7, 0x53, 0xc9, 0x24, 0x91, 0x03, 0x25,
0x47, 0x13, 0x0b, 0x31, 0x78, 0x92, 0x9b, 0x80, 0x86, 0x58, 0x34, 0xd9, 0x78, 0xd8, 0x5b, 0x94,
0xc3, 0x9e, 0x64, 0xc6, 0x66, 0x56, 0x89, 0xa6, 0xed, 0x8d, 0xdf, 0x2f, 0x82, 0x8d, 0xa8, 0x37,
0xa2, 0x4f, 0x0e, 0x97, 0x78, 0x7c, 0x0e, 0xa4, 0xd2, 0x4a, 0xcd, 0x74, 0x6f, 0xdd, 0x39, 0xf4,
0xc4, 0x89, 0xcd, 0x1a, 0xee, 0xe0, 0xa7, 0x60, 0x99, 0x71, 0xcc, 0x7b, 0x4c, 0x3e, 0x75, 0xe5,
0xfd, 0xb7, 0x1f, 0x0a, 0x28, 0x9d, 0xe2, 0xb9, 0x2e, 0x5a, 0x23, 0x05, 0x66, 0xfc, 0x4d, 0x03,
0x5b, 0x59, 0x97, 0x39, 0x74, 0xd8, 0x71, 0xba, 0xc3, 0xbe, 0xfb, 0xc0, 0xcd, 0xcc, 0xe8, 0xb2,
0x7f, 0x69, 0xe0, 0x95, 0xa9, 0x7d, 0xcb, 0x97, 0x54, 0xf0, 0x52, 0x90, 0x61, 0xbf, 0x93, 0x78,
0x22, 0x96, 0xbc, 0x74, 0x96, 0xa3, 0x47, 0xb9, 0x5e, 0xf0, 0x4b, 0xb0, 0x41, 0xbd, 0x2e, 0xf5,
0x88, 0x7a, 0x78, 0xe3, 0xfa, 0xe6, 0x92, 0x47, 0x16, 0x59, 0x16, 0x77, 0x4b, 0xcc, 0x27, 0x47,
0x19, 0x14, 0x34, 0x85, 0x6b, 0xfc, 0x23, 0xa7, 0x32, 0x72, 0x66, 0x14, 0x57, 0x48, 0x4a, 0x48,
0x38, 0x75, 0x85, 0x94, 0x1c, 0x4d, 0x2c, 0x64, 0xdf, 0xc8, 0xa3, 0x50, 0x89, 0x3e, 0xb8, 0x6f,
0xa4, 0x53, 0xa2, 0x6f, 0xe4, 0x1a, 0x29, 0x30, 0x91, 0x84, 0x98, 0xc9, 0x12, 0xb3, 0xd7, 0x24,
0x89, 0x13, 0x25, 0x47, 0x13, 0x0b, 0xe3, 0xbf, 0x85, 0x9c, 0x02, 0xc9, 0x06, 0x4c, 0xec, 0x66,
0xfc, 0xef, 0x40, 0x76, 0x37, 0xce, 0x64, 0x37, 0x0e, 0xfc, 0x9d, 0x06, 0x20, 0x9e, 0x40, 0xb4,
0xc6, 0x0d, 0x1a, 0x75, 0x51, 0xf3, 0x51, 0x57, 0xc2, 0x3c, 0x98, 0xc2, 0x89, 0x5e, 0xe3, 0x1d,
0x15, 0x1f, 0x4e, 0x1b, 0xa0, 0x9c, 0xe0, 0xd0, 0x01, 0xe5, 0x48, 0xda, 0x0c, 0x43, 0x3f, 0x54,
0xd7, 0xd3, 0xb8, 0x33, 0x17, 0x69, 0x69, 0x55, 0xe5, 0xc7, 0x4d, 0xec, 0x7a, 0x3b, 0xac, 0x95,
0x13, 0x7a, 0x94, 0x84, 0x15, 0x51, 0x1c, 0x12, 0x47, 0x59, 0x7a, 0x5c, 0x94, 0x43, 0x32, 0x3b,
0x4a, 0x02, 0x76, 0xa7, 0x09, 0xbe, 0x35, 0xe3, 0x58, 0x1e, 0xf5, 0x66, 0xfd, 0x49, 0x03, 0xc9,
0x18, 0xf0, 0x18, 0x2c, 0x71, 0xaa, 0x6e, 0x5d, 0xfa, 0x03, 0xf0, 0x0e, 0x22, 0x39, 0xa7, 0x2e,
0x89, 0xa9, 0x50, 0xac, 0x90, 0x44, 0x81, 0x6f, 0x80, 0x15, 0x97, 0x30, 0x86, 0x3b, 0x2a, 0x72,
0xfc, 0x39, 0xd4, 0x8a, 0xc4, 0x68, 0xac, 0x87, 0x6f, 0x81, 0x12, 0x11, 0x19, 0x34, 0xc4, 0x00,
0x21, 0x2a, 0x53, 0xb4, 0x2a, 0xa3, 0x61, 0xad, 0xd4, 0x1c, 0x0b, 0x51, 0xac, 0x37, 0xde, 0x07,
0x4f, 0x73, 0xbe, 0x41, 0x61, 0x0d, 0x14, 0x6d, 0xf9, 0x8f, 0x85, 0x26, 0xfd, 0x4b, 0x82, 0x7e,
0x1a, 0xf2, 0xaf, 0x8a, 0x48, 0x6e, 0xfd, 0xe0, 0xf9, 0xcb, 0xea, 0xc2, 0xd7, 0x2f, 0xab, 0x0b,
0x2f, 0x5e, 0x56, 0x17, 0x7e, 0x35, 0xaa, 0x6a, 0xcf, 0x47, 0x55, 0xed, 0xeb, 0x51, 0x55, 0x7b,
0x31, 0xaa, 0x6a, 0xff, 0x1e, 0x55, 0xb5, 0xdf, 0xfe, 0xa7, 0xba, 0xf0, 0x93, 0xed, 0xdc, 0xff,
0x7c, 0xbf, 0x09, 0x00, 0x00, 0xff, 0xff, 0x39, 0x5a, 0x51, 0xe9, 0x0b, 0x16, 0x00, 0x00,
}
func (m *CSIDriver) Marshal() (dAtA []byte, err error) {
@ -826,6 +829,11 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.NodeAllocatableUpdatePeriodSeconds != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.NodeAllocatableUpdatePeriodSeconds))
i--
dAtA[i] = 0x48
}
if m.SELinuxMount != nil {
i--
if *m.SELinuxMount {
@ -1684,6 +1692,11 @@ func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.ErrorCode != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.ErrorCode))
i--
dAtA[i] = 0x18
}
i -= len(m.Message)
copy(dAtA[i:], m.Message)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
@ -1808,6 +1821,9 @@ func (m *CSIDriverSpec) Size() (n int) {
if m.SELinuxMount != nil {
n += 2
}
if m.NodeAllocatableUpdatePeriodSeconds != nil {
n += 1 + sovGenerated(uint64(*m.NodeAllocatableUpdatePeriodSeconds))
}
return n
}
@ -2096,6 +2112,9 @@ func (m *VolumeError) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Message)
n += 1 + l + sovGenerated(uint64(l))
if m.ErrorCode != nil {
n += 1 + sovGenerated(uint64(*m.ErrorCode))
}
return n
}
@ -2162,6 +2181,7 @@ func (this *CSIDriverSpec) String() string {
`TokenRequests:` + repeatedStringForTokenRequests + `,`,
`RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`,
`SELinuxMount:` + valueToStringGenerated(this.SELinuxMount) + `,`,
`NodeAllocatableUpdatePeriodSeconds:` + valueToStringGenerated(this.NodeAllocatableUpdatePeriodSeconds) + `,`,
`}`,
}, "")
return s
@ -2391,6 +2411,7 @@ func (this *VolumeError) String() string {
s := strings.Join([]string{`&VolumeError{`,
`Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`ErrorCode:` + valueToStringGenerated(this.ErrorCode) + `,`,
`}`,
}, "")
return s
@ -2879,6 +2900,26 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error {
}
b := bool(v != 0)
m.SELinuxMount = &b
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NodeAllocatableUpdatePeriodSeconds", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.NodeAllocatableUpdatePeriodSeconds = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@ -5248,6 +5289,26 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error {
}
m.Message = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorCode", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.ErrorCode = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])

View File

@ -212,6 +212,20 @@ message CSIDriverSpec {
// +featureGate=SELinuxMountReadWriteOncePod
// +optional
optional bool seLinuxMount = 8;
// nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of
// the CSINode allocatable capacity for this driver. When set, both periodic updates and
// updates triggered by capacity-related failures are enabled. If not set, no updates
// occur (neither periodic nor upon detecting capacity-related failures), and the
// allocatable.count remains static. The minimum allowed value for this field is 10 seconds.
//
// This is an alpha feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled.
//
// This field is mutable.
//
// +featureGate=MutableCSINodeAllocatableCount
// +optional
optional int64 nodeAllocatableUpdatePeriodSeconds = 9;
}
// CSINode holds information about all CSI drivers installed on a node.
@ -561,6 +575,14 @@ message VolumeError {
// information.
// +optional
optional string message = 2;
// errorCode is a numeric gRPC code representing the error encountered during Attach or Detach operations.
//
// This is an optional, alpha field that requires the MutableCSINodeAllocatableCount feature gate being enabled to be set.
//
// +featureGate=MutableCSINodeAllocatableCount
// +optional
optional int32 errorCode = 3;
}
// VolumeNodeResources is a set of resource limits for scheduling of volumes.

View File

@ -226,6 +226,14 @@ type VolumeError struct {
// information.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// errorCode is a numeric gRPC code representing the error encountered during Attach or Detach operations.
//
// This is an optional, alpha field that requires the MutableCSINodeAllocatableCount feature gate being enabled to be set.
//
// +featureGate=MutableCSINodeAllocatableCount
// +optional
ErrorCode *int32 `json:"errorCode,omitempty" protobuf:"varint,3,opt,name=errorCode"`
}
// +genclient
@ -422,6 +430,20 @@ type CSIDriverSpec struct {
// +featureGate=SELinuxMountReadWriteOncePod
// +optional
SELinuxMount *bool `json:"seLinuxMount,omitempty" protobuf:"varint,8,opt,name=seLinuxMount"`
// nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of
// the CSINode allocatable capacity for this driver. When set, both periodic updates and
// updates triggered by capacity-related failures are enabled. If not set, no updates
// occur (neither periodic nor upon detecting capacity-related failures), and the
// allocatable.count remains static. The minimum allowed value for this field is 10 seconds.
//
// This is an alpha feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled.
//
// This field is mutable.
//
// +featureGate=MutableCSINodeAllocatableCount
// +optional
NodeAllocatableUpdatePeriodSeconds *int64 `json:"nodeAllocatableUpdatePeriodSeconds,omitempty" protobuf:"varint,9,opt,name=nodeAllocatableUpdatePeriodSeconds"`
}
// FSGroupPolicy specifies if a CSI Driver supports modifying

View File

@ -48,15 +48,16 @@ func (CSIDriverList) SwaggerDoc() map[string]string {
}
var map_CSIDriverSpec = map[string]string{
"": "CSIDriverSpec is the specification of a CSIDriver.",
"attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.",
"podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.",
"volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.",
"storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.",
"fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.",
"tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"<audience>\": {\n \"token\": <token>,\n \"expirationTimestamp\": <expiration timestamp in RFC3339>,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.",
"requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.",
"seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".",
"": "CSIDriverSpec is the specification of a CSIDriver.",
"attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.",
"podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.",
"volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.",
"storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.",
"fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field was immutable in Kubernetes < 1.29 and now is mutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.",
"tokenRequests": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"<audience>\": {\n \"token\": <token>,\n \"expirationTimestamp\": <expiration timestamp in RFC3339>,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.",
"requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.",
"seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".",
"nodeAllocatableUpdatePeriodSeconds": "nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of the CSINode allocatable capacity for this driver. When set, both periodic updates and updates triggered by capacity-related failures are enabled. If not set, no updates occur (neither periodic nor upon detecting capacity-related failures), and the allocatable.count remains static. The minimum allowed value for this field is 10 seconds.\n\nThis is an alpha feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled.\n\nThis field is mutable.",
}
func (CSIDriverSpec) SwaggerDoc() map[string]string {
@ -217,9 +218,10 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string {
}
var map_VolumeError = map[string]string{
"": "VolumeError captures an error encountered during a volume operation.",
"time": "time represents the time the error was encountered.",
"message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.",
"": "VolumeError captures an error encountered during a volume operation.",
"time": "time represents the time the error was encountered.",
"message": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.",
"errorCode": "errorCode is a numeric gRPC code representing the error encountered during Attach or Detach operations.\n\nThis is an optional, alpha field that requires the MutableCSINodeAllocatableCount feature gate being enabled to be set.",
}
func (VolumeError) SwaggerDoc() map[string]string {

View File

@ -132,6 +132,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) {
*out = new(bool)
**out = **in
}
if in.NodeAllocatableUpdatePeriodSeconds != nil {
in, out := &in.NodeAllocatableUpdatePeriodSeconds, &out.NodeAllocatableUpdatePeriodSeconds
*out = new(int64)
**out = **in
}
return
}
@ -583,6 +588,11 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
func (in *VolumeError) DeepCopyInto(out *VolumeError) {
*out = *in
in.Time.DeepCopyInto(&out.Time)
if in.ErrorCode != nil {
in, out := &in.ErrorCode, &out.ErrorCode
*out = new(int32)
**out = **in
}
return
}

View File

@ -0,0 +1,56 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operation
import "k8s.io/apimachinery/pkg/util/sets"
// Operation provides contextual information about a validation request and the API
// operation being validated.
// This type is intended for use with generate validation code and may be enhanced
// in the future to include other information needed to validate requests.
type Operation struct {
// Type is the category of operation being validated. This does not
// differentiate between HTTP verbs like PUT and PATCH, but rather merges
// those into a single "Update" category.
Type Type
// Options declare the options enabled for validation.
//
// Options should be set according to a resource validation strategy before validation
// is performed, and must be treated as read-only during validation.
//
// Options are identified by string names. Option string names may match the name of a feature
// gate, in which case the presence of the name in the set indicates that the feature is
// considered enabled for the resource being validated. Note that a resource may have a
// feature enabled even when the feature gate is disabled. This can happen when feature is
// already in-use by a resource, often because the feature gate was enabled when the
// resource first began using the feature.
//
// Unset options are disabled/false.
Options sets.Set[string]
}
// Code is the request operation to be validated.
type Type uint32
const (
// Create indicates the request being validated is for a resource create operation.
Create Type = iota
// Update indicates the request being validated is for a resource update operation.
Update
)

View File

@ -21,4 +21,4 @@ limitations under the License.
// +groupName=meta.k8s.io
package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1"
package v1

View File

@ -20,21 +20,22 @@ limitations under the License.
package v1
import (
"math/rand"
"time"
fuzz "github.com/google/gofuzz"
"sigs.k8s.io/randfill"
)
// Fuzz satisfies fuzz.Interface.
func (t *MicroTime) Fuzz(c fuzz.Continue) {
// Fuzz satisfies randfill.SimpleSelfFiller.
func (t *MicroTime) RandFill(r *rand.Rand) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Accurate to a tenth of
// micro second. Leave off nanoseconds because JSON doesn't
// represent them so they can't round-trip properly.
t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 1000*r.Int63n(1000000))
}
// ensure MicroTime implements fuzz.Interface
var _ fuzz.Interface = &MicroTime{}
// ensure MicroTime implements randfill.Interface
var _ randfill.SimpleSelfFiller = &MicroTime{}

View File

@ -20,21 +20,22 @@ limitations under the License.
package v1
import (
"math/rand"
"time"
fuzz "github.com/google/gofuzz"
"sigs.k8s.io/randfill"
)
// Fuzz satisfies fuzz.Interface.
func (t *Time) Fuzz(c fuzz.Continue) {
// Fuzz satisfies randfill.SimpleSelfFiller.
func (t *Time) RandFill(r *rand.Rand) {
if t == nil {
return
}
// Allow for about 1000 years of randomness. Leave off nanoseconds
// because JSON doesn't represent them so they can't round-trip
// properly.
t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 0)
}
// ensure Time implements fuzz.Interface
var _ fuzz.Interface = &Time{}
// ensure Time implements randfill.SimpleSelfFiller
var _ randfill.SimpleSelfFiller = &Time{}

View File

@ -21,4 +21,4 @@ limitations under the License.
// but for the fields which did not change, copying is automated. This makes it
// easy to modify the structures you use in memory without affecting the format
// you store on disk or respond to in your external API calls.
package conversion // import "k8s.io/apimachinery/pkg/conversion"
package conversion

View File

@ -16,4 +16,4 @@ limitations under the License.
// Package queryparams provides conversion from versioned
// runtime objects to URL query values
package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams"
package queryparams

View File

@ -16,4 +16,4 @@ limitations under the License.
// Package fields implements a simple field system, parsing and matching
// selectors with sets of fields.
package fields // import "k8s.io/apimachinery/pkg/fields"
package fields

View File

@ -16,4 +16,4 @@ limitations under the License.
// Package labels implements a simple label system, parsing and matching
// selectors with sets of labels.
package labels // import "k8s.io/apimachinery/pkg/labels"
package labels

View File

@ -48,4 +48,4 @@ limitations under the License.
//
// As a bonus, a few common types useful from all api objects and versions
// are provided in types.go.
package runtime // import "k8s.io/apimachinery/pkg/runtime"
package runtime

View File

@ -259,6 +259,7 @@ type ObjectDefaulter interface {
type ObjectVersioner interface {
ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
PrioritizedVersionsForGroup(group string) []schema.GroupVersion
}
// ObjectConvertor converts an object to a different version.

View File

@ -17,15 +17,18 @@ limitations under the License.
package runtime
import (
"context"
"fmt"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/api/operation"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/naming"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// Scheme defines methods for serializing and deserializing API objects, a type
@ -68,6 +71,12 @@ type Scheme struct {
// the provided object must be a pointer.
defaulterFuncs map[reflect.Type]func(interface{})
// validationFuncs is a map to funcs to be called with an object to perform validation.
// The provided object must be a pointer.
// If oldObject is non-nil, update validation is performed and may perform additional
// validation such as transition rules and immutability checks.
validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList
// converter stores all registered conversion functions. It also has
// default converting behavior.
converter *conversion.Converter
@ -96,6 +105,7 @@ func NewScheme() *Scheme {
unversionedKinds: map[string]reflect.Type{},
fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
defaulterFuncs: map[reflect.Type]func(interface{}){},
validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresource ...string) field.ErrorList{},
versionPriority: map[string][]string{},
schemeName: naming.GetNameFromCallsite(internalPackages...),
}
@ -347,6 +357,35 @@ func (s *Scheme) Default(src Object) {
}
}
// AddValidationFunc registered a function that can validate the object, and
// oldObject. These functions will be invoked when Validate() or ValidateUpdate()
// is called. The function will never be called unless the validated object
// matches srcType. If this function is invoked twice with the same srcType, the
// fn passed to the later call will be used instead.
func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList) {
s.validationFuncs[reflect.TypeOf(srcType)] = fn
}
// Validate validates the provided Object according to the generated declarative validation code.
// WARNING: This does not validate all objects! The handwritten validation code in validation.go
// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
func (s *Scheme) Validate(ctx context.Context, options sets.Set[string], object Object, subresources ...string) field.ErrorList {
if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
return fn(ctx, operation.Operation{Type: operation.Create, Options: options}, object, nil, subresources...)
}
return nil
}
// ValidateUpdate validates the provided object and oldObject according to the generated declarative validation code.
// WARNING: This does not validate all objects! The handwritten validation code in validation.go
// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
func (s *Scheme) ValidateUpdate(ctx context.Context, options sets.Set[string], object, oldObject Object, subresources ...string) field.ErrorList {
if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
return fn(ctx, operation.Operation{Type: operation.Update, Options: options}, object, oldObject, subresources...)
}
return nil
}
// Convert will attempt to convert in into out. Both must be pointers. For easy
// testing of conversion functions. Returns an error if the conversion isn't
// possible. You can call this with types that haven't been registered (for example,

View File

@ -140,7 +140,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec
var wg sync.WaitGroup
wg.Add(1)
defer wg.Done()
c = checker{
placeholder := checker{
safe: func() bool {
wg.Wait()
return c.safe()
@ -150,7 +150,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec
return c.check(rv, depth)
},
}
if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded {
if actual, loaded := cache.m.LoadOrStore(rt, &placeholder); loaded {
// Someone else stored an entry for this type, use it.
return *actual.(*checker)
}

View File

@ -18,6 +18,7 @@ package runtime
import (
"fmt"
"io"
)
type ProtobufMarshaller interface {
@ -28,6 +29,124 @@ type ProtobufReverseMarshaller interface {
MarshalToSizedBuffer(data []byte) (int, error)
}
const (
typeMetaTag = 0xa
rawTag = 0x12
contentEncodingTag = 0x1a
contentTypeTag = 0x22
// max length of a varint for a uint64
maxUint64VarIntLength = 10
)
// MarshalToWriter allows a caller to provide a streaming writer for raw bytes,
// instead of populating them inside the Unknown struct.
// rawSize is the number of bytes rawWriter will write in a success case.
// writeRaw is called when it is time to write the raw bytes. It must return `rawSize, nil` or an error.
func (m *Unknown) MarshalToWriter(w io.Writer, rawSize int, writeRaw func(io.Writer) (int, error)) (int, error) {
size := 0
// reuse the buffer for varint marshaling
varintBuffer := make([]byte, maxUint64VarIntLength)
writeVarint := func(i int) (int, error) {
offset := encodeVarintGenerated(varintBuffer, len(varintBuffer), uint64(i))
return w.Write(varintBuffer[offset:])
}
// TypeMeta
{
n, err := w.Write([]byte{typeMetaTag})
size += n
if err != nil {
return size, err
}
typeMetaBytes, err := m.TypeMeta.Marshal()
if err != nil {
return size, err
}
n, err = writeVarint(len(typeMetaBytes))
size += n
if err != nil {
return size, err
}
n, err = w.Write(typeMetaBytes)
size += n
if err != nil {
return size, err
}
}
// Raw, delegating write to writeRaw()
{
n, err := w.Write([]byte{rawTag})
size += n
if err != nil {
return size, err
}
n, err = writeVarint(rawSize)
size += n
if err != nil {
return size, err
}
n, err = writeRaw(w)
size += n
if err != nil {
return size, err
}
if n != int(rawSize) {
return size, fmt.Errorf("the size value was %d, but encoding wrote %d bytes to data", rawSize, n)
}
}
// ContentEncoding
{
n, err := w.Write([]byte{contentEncodingTag})
size += n
if err != nil {
return size, err
}
n, err = writeVarint(len(m.ContentEncoding))
size += n
if err != nil {
return size, err
}
n, err = w.Write([]byte(m.ContentEncoding))
size += n
if err != nil {
return size, err
}
}
// ContentEncoding
{
n, err := w.Write([]byte{contentTypeTag})
size += n
if err != nil {
return size, err
}
n, err = writeVarint(len(m.ContentType))
size += n
if err != nil {
return size, err
}
n, err = w.Write([]byte(m.ContentType))
size += n
if err != nil {
return size, err
}
}
return size, nil
}
// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller.
func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
@ -43,12 +162,12 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
copy(data[i:], m.ContentType)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
i--
data[i] = 0x22
data[i] = contentTypeTag
i -= len(m.ContentEncoding)
copy(data[i:], m.ContentEncoding)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
i--
data[i] = 0x1a
data[i] = contentEncodingTag
if b != nil {
if r, ok := b.(ProtobufReverseMarshaller); ok {
n1, err := r.MarshalToSizedBuffer(data[:i])
@ -75,7 +194,7 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
}
i = encodeVarintGenerated(data, i, size)
i--
data[i] = 0x12
data[i] = rawTag
}
n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i])
if err != nil {
@ -84,6 +203,6 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64
i -= n2
i = encodeVarintGenerated(data, i, uint64(n2))
i--
data[i] = 0xa
data[i] = typeMetaTag
return msgSize - i, nil
}

View File

@ -15,4 +15,4 @@ limitations under the License.
*/
// Package types implements various generic types used throughout kubernetes.
package types // import "k8s.io/apimachinery/pkg/types"
package types

View File

@ -15,4 +15,4 @@ limitations under the License.
*/
// Package errors implements various utility functions and types around errors.
package errors // import "k8s.io/apimachinery/pkg/util/errors"
package errors

View File

@ -20,24 +20,24 @@ limitations under the License.
package intstr
import (
fuzz "github.com/google/gofuzz"
"sigs.k8s.io/randfill"
)
// Fuzz satisfies fuzz.Interface
func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
// RandFill satisfies randfill.NativeSelfFiller
func (intstr *IntOrString) RandFill(c randfill.Continue) {
if intstr == nil {
return
}
if c.RandBool() {
if c.Bool() {
intstr.Type = Int
c.Fuzz(&intstr.IntVal)
c.Fill(&intstr.IntVal)
intstr.StrVal = ""
} else {
intstr.Type = String
intstr.IntVal = 0
c.Fuzz(&intstr.StrVal)
c.Fill(&intstr.StrVal)
}
}
// ensure IntOrString implements fuzz.Interface
var _ fuzz.Interface = &IntOrString{}
var _ randfill.NativeSelfFiller = &IntOrString{}

View File

@ -36,6 +36,11 @@ var (
)
// PanicHandlers is a list of functions which will be invoked when a panic happens.
//
// The code invoking these handlers prepares a contextual logger so that
// klog.FromContext(ctx) already skips over the panic handler itself and
// several other intermediate functions, ideally such that the log output
// is attributed to the code which triggered the panic.
var PanicHandlers = []func(context.Context, interface{}){logPanic}
// HandleCrash simply catches a crash and logs an error. Meant to be called via
@ -45,7 +50,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic}
//
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
//
// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
// Contextual logging: HandleCrashWithContext or HandleCrashWithLogger should be used instead of HandleCrash in code which supports contextual logging.
func HandleCrash(additionalHandlers ...func(interface{})) {
if r := recover(); r != nil {
additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
@ -74,10 +79,30 @@ func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(cont
}
}
// handleCrash is the common implementation of HandleCrash and HandleCrash.
// HandleCrashWithLogger simply catches a crash and logs an error. Meant to be called via
// defer. Additional context-specific handlers can be provided, and will be
// called in case of panic. HandleCrash actually crashes, after calling the
// handlers and logging the panic message.
//
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
func HandleCrashWithLogger(logger klog.Logger, additionalHandlers ...func(context.Context, interface{})) {
if r := recover(); r != nil {
ctx := klog.NewContext(context.Background(), logger)
handleCrash(ctx, r, additionalHandlers...)
}
}
// handleCrash is the common implementation of the HandleCrash* variants.
// Having those call a common implementation ensures that the stack depth
// is the same regardless through which path the handlers get invoked.
func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) {
// We don't really know how many call frames to skip because the Go
// panic handler is between us and the code where the panic occurred.
// If it's one function (as in Go 1.21), then skipping four levels
// gets us to the function which called the `defer HandleCrashWithontext(...)`.
logger := klog.FromContext(ctx).WithCallDepth(4)
ctx = klog.NewContext(ctx, logger)
for _, fn := range PanicHandlers {
fn(ctx, r)
}
@ -106,11 +131,7 @@ func logPanic(ctx context.Context, r interface{}) {
stacktrace := make([]byte, size)
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
// We don't really know how many call frames to skip because the Go
// panic handler is between us and the code where the panic occurred.
// If it's one function (as in Go 1.21), then skipping four levels
// gets us to the function which called the `defer HandleCrashWithontext(...)`.
logger := klog.FromContext(ctx).WithCallDepth(4)
logger := klog.FromContext(ctx)
// For backwards compatibility, conversion to string
// is handled here instead of defering to the logging
@ -176,12 +197,19 @@ func HandleError(err error) {
// and key/value pairs.
//
// This variant should be used instead of HandleError because it supports
// structured, contextual logging.
// structured, contextual logging. Alternatively, [HandleErrorWithLogger] can
// be used if a logger is available instead of a context.
func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {
handleError(ctx, err, msg, keysAndValues...)
}
// handleError is the common implementation of HandleError and HandleErrorWithContext.
// HandleErrorWithLogger is an alternative to [HandlerErrorWithContext] which accepts
// a logger for contextual logging.
func HandleErrorWithLogger(logger klog.Logger, err error, msg string, keysAndValues ...interface{}) {
handleError(klog.NewContext(context.Background(), logger), err, msg, keysAndValues...)
}
// handleError is the common implementation of the HandleError* variants.
// Using this common implementation ensures that the stack depth
// is the same regardless through which path the handlers get invoked.
func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) {

View File

@ -16,4 +16,4 @@ limitations under the License.
// Package sets has generic set and specified sets. Generic set will
// replace specified ones over time. And specific ones are deprecated.
package sets // import "k8s.io/apimachinery/pkg/util/sets"
package sets

View File

@ -0,0 +1,212 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package field
import (
"fmt"
"reflect"
"regexp"
"strings"
)
// ErrorMatcher is a helper for comparing Error objects.
type ErrorMatcher struct {
// TODO(thockin): consider whether type is ever NOT required, maybe just
// assume it.
matchType bool
// TODO(thockin): consider whether field could be assumed - if the
// "want" error has a nil field, don't match on field.
matchField bool
// TODO(thockin): consider whether value could be assumed - if the
// "want" error has a nil value, don't match on field.
matchValue bool
matchOrigin bool
matchDetail func(want, got string) bool
requireOriginWhenInvalid bool
}
// Matches returns true if the two Error objects match according to the
// configured criteria.
func (m ErrorMatcher) Matches(want, got *Error) bool {
if m.matchType && want.Type != got.Type {
return false
}
if m.matchField && want.Field != got.Field {
return false
}
if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) {
return false
}
if m.matchOrigin {
if want.Origin != got.Origin {
return false
}
if m.requireOriginWhenInvalid && want.Type == ErrorTypeInvalid {
if want.Origin == "" || got.Origin == "" {
return false
}
}
}
if m.matchDetail != nil && !m.matchDetail(want.Detail, got.Detail) {
return false
}
return true
}
// Render returns a string representation of the specified Error object,
// according to the criteria configured in the ErrorMatcher.
func (m ErrorMatcher) Render(e *Error) string {
buf := strings.Builder{}
comma := func() {
if buf.Len() > 0 {
buf.WriteString(", ")
}
}
if m.matchType {
comma()
buf.WriteString(fmt.Sprintf("Type=%q", e.Type))
}
if m.matchField {
comma()
buf.WriteString(fmt.Sprintf("Field=%q", e.Field))
}
if m.matchValue {
comma()
buf.WriteString(fmt.Sprintf("Value=%v", e.BadValue))
}
if m.matchOrigin || m.requireOriginWhenInvalid && e.Type == ErrorTypeInvalid {
comma()
buf.WriteString(fmt.Sprintf("Origin=%q", e.Origin))
}
if m.matchDetail != nil {
comma()
buf.WriteString(fmt.Sprintf("Detail=%q", e.Detail))
}
return "{" + buf.String() + "}"
}
// Exactly returns a derived ErrorMatcher which matches all fields exactly.
func (m ErrorMatcher) Exactly() ErrorMatcher {
return m.ByType().ByField().ByValue().ByOrigin().ByDetailExact()
}
// ByType returns a derived ErrorMatcher which also matches by type.
func (m ErrorMatcher) ByType() ErrorMatcher {
m.matchType = true
return m
}
// ByField returns a derived ErrorMatcher which also matches by field path.
func (m ErrorMatcher) ByField() ErrorMatcher {
m.matchField = true
return m
}
// ByValue returns a derived ErrorMatcher which also matches by the errant
// value.
func (m ErrorMatcher) ByValue() ErrorMatcher {
m.matchValue = true
return m
}
// ByOrigin returns a derived ErrorMatcher which also matches by the origin.
func (m ErrorMatcher) ByOrigin() ErrorMatcher {
m.matchOrigin = true
return m
}
// RequireOriginWhenInvalid returns a derived ErrorMatcher which also requires
// the Origin field to be set when the Type is Invalid and the matcher is
// matching by Origin.
func (m ErrorMatcher) RequireOriginWhenInvalid() ErrorMatcher {
m.requireOriginWhenInvalid = true
return m
}
// ByDetailExact returns a derived ErrorMatcher which also matches errors by
// the exact detail string.
func (m ErrorMatcher) ByDetailExact() ErrorMatcher {
m.matchDetail = func(want, got string) bool {
return got == want
}
return m
}
// ByDetailSubstring returns a derived ErrorMatcher which also matches errors
// by a substring of the detail string.
func (m ErrorMatcher) ByDetailSubstring() ErrorMatcher {
m.matchDetail = func(want, got string) bool {
return strings.Contains(got, want)
}
return m
}
// ByDetailRegexp returns a derived ErrorMatcher which also matches errors by a
// regular expression of the detail string, where the "want" string is assumed
// to be a valid regular expression.
func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher {
m.matchDetail = func(want, got string) bool {
return regexp.MustCompile(want).MatchString(got)
}
return m
}
// TestIntf lets users pass a testing.T while not coupling this package to Go's
// testing package.
type TestIntf interface {
Helper()
Errorf(format string, args ...any)
Logf(format string, args ...any)
}
// Test compares two ErrorLists by the criteria configured in this matcher, and
// fails the test if they don't match. If a given "want" error matches multiple
// "got" errors, they will all be consumed. This might be OK (e.g. if there are
// multiple errors on the same field from the same origin) or it might be an
// insufficiently specific matcher, so these will be logged.
func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) {
tb.Helper()
remaining := got
for _, w := range want {
tmp := make(ErrorList, 0, len(remaining))
n := 0
for _, g := range remaining {
if m.Matches(w, g) {
n++
} else {
tmp = append(tmp, g)
}
}
if n == 0 {
tb.Errorf("expected an error matching:\n%s", m.Render(w))
} else if n > 1 {
// This is not necessarily and error, but it's worth logging in
// case it's not what the test author intended.
tb.Logf("multiple errors matched:\n%s", m.Render(w))
}
remaining = tmp
}
if len(remaining) > 0 {
for _, e := range remaining {
exactly := m.Exactly() // makes a copy
tb.Errorf("unmatched error:\n%s", exactly.Render(e))
}
}
}

View File

@ -33,13 +33,35 @@ type Error struct {
Field string
BadValue interface{}
Detail string
// Origin uniquely identifies where this error was generated from. It is used in testing to
// compare expected errors against actual errors without relying on exact detail string matching.
// This allows tests to verify the correct validation logic triggered the error
// regardless of how the error message might be formatted or localized.
//
// The value should be either:
// - A simple camelCase identifier (e.g., "maximum", "maxItems")
// - A structured format using "format=<dash-style-identifier>" for validation errors related to specific formats
// (e.g., "format=dns-label", "format=qualified-name")
//
// If the Origin corresponds to an existing declarative validation tag or JSON Schema keyword,
// use that same name for consistency.
//
// Origin should be set in the most deeply nested validation function that
// can still identify the unique source of the error.
Origin string
// CoveredByDeclarative is true when this error is covered by declarative
// validation. This field is to identify errors from imperative validation
// that should also be caught by declarative validation.
CoveredByDeclarative bool
}
var _ error = &Error{}
// Error implements the error interface.
func (v *Error) Error() string {
return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
func (e *Error) Error() string {
return fmt.Sprintf("%s: %s", e.Field, e.ErrorBody())
}
type OmitValueType struct{}
@ -48,21 +70,21 @@ var omitValue = OmitValueType{}
// ErrorBody returns the error message without the field name. This is useful
// for building nice-looking higher-level error reporting.
func (v *Error) ErrorBody() string {
func (e *Error) ErrorBody() string {
var s string
switch {
case v.Type == ErrorTypeRequired:
s = v.Type.String()
case v.Type == ErrorTypeForbidden:
s = v.Type.String()
case v.Type == ErrorTypeTooLong:
s = v.Type.String()
case v.Type == ErrorTypeInternal:
s = v.Type.String()
case v.BadValue == omitValue:
s = v.Type.String()
case e.Type == ErrorTypeRequired:
s = e.Type.String()
case e.Type == ErrorTypeForbidden:
s = e.Type.String()
case e.Type == ErrorTypeTooLong:
s = e.Type.String()
case e.Type == ErrorTypeInternal:
s = e.Type.String()
case e.BadValue == omitValue:
s = e.Type.String()
default:
value := v.BadValue
value := e.BadValue
valueType := reflect.TypeOf(value)
if value == nil || valueType == nil {
value = "null"
@ -76,26 +98,38 @@ func (v *Error) ErrorBody() string {
switch t := value.(type) {
case int64, int32, float64, float32, bool:
// use simple printer for simple types
s = fmt.Sprintf("%s: %v", v.Type, value)
s = fmt.Sprintf("%s: %v", e.Type, value)
case string:
s = fmt.Sprintf("%s: %q", v.Type, t)
s = fmt.Sprintf("%s: %q", e.Type, t)
case fmt.Stringer:
// anything that defines String() is better than raw struct
s = fmt.Sprintf("%s: %s", v.Type, t.String())
s = fmt.Sprintf("%s: %s", e.Type, t.String())
default:
// fallback to raw struct
// TODO: internal types have panic guards against json.Marshalling to prevent
// accidental use of internal types in external serialized form. For now, use
// %#v, although it would be better to show a more expressive output in the future
s = fmt.Sprintf("%s: %#v", v.Type, value)
s = fmt.Sprintf("%s: %#v", e.Type, value)
}
}
if len(v.Detail) != 0 {
s += fmt.Sprintf(": %s", v.Detail)
if len(e.Detail) != 0 {
s += fmt.Sprintf(": %s", e.Detail)
}
return s
}
// WithOrigin adds origin information to the FieldError
func (e *Error) WithOrigin(o string) *Error {
e.Origin = o
return e
}
// MarkCoveredByDeclarative marks the error as covered by declarative validation.
func (e *Error) MarkCoveredByDeclarative() *Error {
e.CoveredByDeclarative = true
return e
}
// ErrorType is a machine readable value providing more detail about why
// a field is invalid. These values are expected to match 1-1 with
// CauseType in api/types.go.
@ -169,32 +203,32 @@ func (t ErrorType) String() string {
// TypeInvalid returns a *Error indicating "type is invalid"
func TypeInvalid(field *Path, value interface{}, detail string) *Error {
return &Error{ErrorTypeTypeInvalid, field.String(), value, detail}
return &Error{ErrorTypeTypeInvalid, field.String(), value, detail, "", false}
}
// NotFound returns a *Error indicating "value not found". This is
// used to report failure to find a requested value (e.g. looking up an ID).
func NotFound(field *Path, value interface{}) *Error {
return &Error{ErrorTypeNotFound, field.String(), value, ""}
return &Error{ErrorTypeNotFound, field.String(), value, "", "", false}
}
// Required returns a *Error indicating "value required". This is used
// to report required values that are not provided (e.g. empty strings, null
// values, or empty arrays).
func Required(field *Path, detail string) *Error {
return &Error{ErrorTypeRequired, field.String(), "", detail}
return &Error{ErrorTypeRequired, field.String(), "", detail, "", false}
}
// Duplicate returns a *Error indicating "duplicate value". This is
// used to report collisions of values that must be unique (e.g. names or IDs).
func Duplicate(field *Path, value interface{}) *Error {
return &Error{ErrorTypeDuplicate, field.String(), value, ""}
return &Error{ErrorTypeDuplicate, field.String(), value, "", "", false}
}
// Invalid returns a *Error indicating "invalid value". This is used
// to report malformed values (e.g. failed regex match, too long, out of bounds).
func Invalid(field *Path, value interface{}, detail string) *Error {
return &Error{ErrorTypeInvalid, field.String(), value, detail}
return &Error{ErrorTypeInvalid, field.String(), value, detail, "", false}
}
// NotSupported returns a *Error indicating "unsupported value".
@ -209,7 +243,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
}
detail = "supported values: " + strings.Join(quotedValues, ", ")
}
return &Error{ErrorTypeNotSupported, field.String(), value, detail}
return &Error{ErrorTypeNotSupported, field.String(), value, detail, "", false}
}
// Forbidden returns a *Error indicating "forbidden". This is used to
@ -217,7 +251,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E
// some conditions, but which are not permitted by current conditions (e.g.
// security policy).
func Forbidden(field *Path, detail string) *Error {
return &Error{ErrorTypeForbidden, field.String(), "", detail}
return &Error{ErrorTypeForbidden, field.String(), "", detail, "", false}
}
// TooLong returns a *Error indicating "too long". This is used to report that
@ -231,7 +265,7 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error {
} else {
msg = "value is too long"
}
return &Error{ErrorTypeTooLong, field.String(), "<value omitted>", msg}
return &Error{ErrorTypeTooLong, field.String(), "<value omitted>", msg, "", false}
}
// TooLongMaxLength returns a *Error indicating "too long".
@ -259,14 +293,14 @@ func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
actual = omitValue
}
return &Error{ErrorTypeTooMany, field.String(), actual, msg}
return &Error{ErrorTypeTooMany, field.String(), actual, msg, "", false}
}
// InternalError returns a *Error indicating "internal error". This is used
// to signal that an error was found that was not directly related to user
// input. The err argument must be non-nil.
func InternalError(field *Path, err error) *Error {
return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
return &Error{ErrorTypeInternal, field.String(), nil, err.Error(), "", false}
}
// ErrorList holds a set of Errors. It is plausible that we might one day have
@ -285,6 +319,22 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
}
}
// WithOrigin sets the origin for all errors in the list and returns the updated list.
func (list ErrorList) WithOrigin(origin string) ErrorList {
for _, err := range list {
err.Origin = origin
}
return list
}
// MarkCoveredByDeclarative marks all errors in the list as covered by declarative validation.
func (list ErrorList) MarkCoveredByDeclarative() ErrorList {
for _, err := range list {
err.CoveredByDeclarative = true
}
return list
}
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
if len(list) == 0 {
@ -321,3 +371,25 @@ func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
// FilterOut takes an Aggregate and returns an Aggregate
return fromAggregate(err.(utilerrors.Aggregate))
}
// ExtractCoveredByDeclarative returns a new ErrorList containing only the errors that should be covered by declarative validation.
func (list ErrorList) ExtractCoveredByDeclarative() ErrorList {
newList := ErrorList{}
for _, err := range list {
if err.CoveredByDeclarative {
newList = append(newList, err)
}
}
return newList
}
// RemoveCoveredByDeclarative returns a new ErrorList containing only the errors that should not be covered by declarative validation.
func (list ErrorList) RemoveCoveredByDeclarative() ErrorList {
newList := ErrorList{}
for _, err := range list {
if !err.CoveredByDeclarative {
newList = append(newList, err)
}
}
return newList
}

View File

@ -0,0 +1,278 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"net"
"net/netip"
"slices"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
func parseIP(fldPath *field.Path, value string, strictValidation bool) (net.IP, field.ErrorList) {
var allErrors field.ErrorList
ip := netutils.ParseIPSloppy(value)
if ip == nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"))
return nil, allErrors
}
if strictValidation {
addr, err := netip.ParseAddr(value)
if err != nil {
// If netutils.ParseIPSloppy parsed it, but netip.ParseAddr
// doesn't, then it must have illegal leading 0s.
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s"))
}
if addr.Is4In6() {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not be an IPv4-mapped IPv6 address"))
}
}
return ip, allErrors
}
// IsValidIPForLegacyField tests that the argument is a valid IP address for a "legacy"
// API field that predates strict IP validation. In particular, this allows IPs that are
// not in canonical form (e.g., "FE80:0:0:0:0:0:0:0abc" instead of "fe80::abc").
//
// If strictValidation is false, this also allows IPs in certain invalid or ambiguous
// formats:
//
// 1. IPv4 IPs are allowed to have leading "0"s in octets (e.g. "010.002.003.004").
// Historically, net.ParseIP (and later netutils.ParseIPSloppy) simply ignored leading
// "0"s in IPv4 addresses, but most libc-based software treats 0-prefixed IPv4 octets
// as octal, meaning different software might interpret the same string as two
// different IPs, potentially leading to security issues. (Current net.ParseIP and
// netip.ParseAddr simply reject inputs with leading "0"s.)
//
// 2. IPv4-mapped IPv6 IPs (e.g. "::ffff:1.2.3.4") are allowed. These can also lead to
// different software interpreting the value in different ways, because they may be
// treated as IPv4 by some software and IPv6 by other software. (net.ParseIP and
// netip.ParseAddr both allow these, but there are no use cases for representing IPv4
// addresses as IPv4-mapped IPv6 addresses in Kubernetes.)
//
// Alternatively, when validating an update to an existing field, you can pass a list of
// IP values from the old object that should be accepted if they appear in the new object
// even if they are not valid.
//
// This function should only be used to validate the existing fields that were
// historically validated in this way, and strictValidation should be true unless the
// StrictIPCIDRValidation feature gate is disabled. Use IsValidIP for parsing new fields.
func IsValidIPForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldIPs []string) field.ErrorList {
if slices.Contains(validOldIPs, value) {
return nil
}
_, allErrors := parseIP(fldPath, value, strictValidation)
return allErrors.WithOrigin("format=ip-sloppy")
}
// IsValidIP tests that the argument is a valid IP address, according to current
// Kubernetes standards for IP address validation.
func IsValidIP(fldPath *field.Path, value string) field.ErrorList {
ip, allErrors := parseIP(fldPath, value, true)
if len(allErrors) != 0 {
return allErrors.WithOrigin("format=ip-strict")
}
if value != ip.String() {
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ip.String())))
}
return allErrors.WithOrigin("format=ip-strict")
}
// GetWarningsForIP returns warnings for IP address values in non-standard forms. This
// should only be used with fields that are validated with IsValidIPForLegacyField().
func GetWarningsForIP(fldPath *field.Path, value string) []string {
ip := netutils.ParseIPSloppy(value)
if ip == nil {
klog.ErrorS(nil, "GetWarningsForIP called on value that was not validated with IsValidIPForLegacyField", "field", fldPath, "value", value)
return nil
}
addr, _ := netip.ParseAddr(value)
if !addr.IsValid() || addr.Is4In6() {
// This catches 2 cases: leading 0s (if ParseIPSloppy() accepted it but
// ParseAddr() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
// re-stringifying the net.IP value will give the preferred form.
return []string{
fmt.Sprintf("%s: non-standard IP address %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ip.String()),
}
}
// If ParseIPSloppy() and ParseAddr() both accept it then it's fully valid, though
// it may be non-canonical.
if addr.Is6() && addr.String() != value {
return []string{
fmt.Sprintf("%s: IPv6 address %q should be in RFC 5952 canonical format (%q)", fldPath, value, addr.String()),
}
}
return nil
}
func parseCIDR(fldPath *field.Path, value string, strictValidation bool) (*net.IPNet, field.ErrorList) {
var allErrors field.ErrorList
_, ipnet, err := netutils.ParseCIDRSloppy(value)
if err != nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)"))
return nil, allErrors
}
if strictValidation {
prefix, err := netip.ParsePrefix(value)
if err != nil {
// If netutils.ParseCIDRSloppy parsed it, but netip.ParsePrefix
// doesn't, then it must have illegal leading 0s (either in the
// IP part or the prefix).
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s in IP or prefix length"))
} else if prefix.Addr().Is4In6() {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have an IPv4-mapped IPv6 address"))
} else if prefix.Addr() != prefix.Masked().Addr() {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have bits set beyond the prefix length"))
}
}
return ipnet, allErrors
}
// IsValidCIDRForLegacyField tests that the argument is a valid CIDR value for a "legacy"
// API field that predates strict IP validation. In particular, this allows IPs that are
// not in canonical form (e.g., "FE80:0abc:0:0:0:0:0:0/64" instead of "fe80:abc::/64").
//
// If strictValidation is false, this also allows CIDR values in certain invalid or
// ambiguous formats:
//
// 1. The IP part of the CIDR value is parsed as with IsValidIPForLegacyField with
// strictValidation=false.
//
// 2. The CIDR value is allowed to be either a "subnet"/"mask" (with the lower bits after
// the prefix length all being 0), or an "interface address" as with `ip addr` (with a
// complete IP address and associated subnet length). With strict validation, the
// value is required to be in "subnet"/"mask" form.
//
// 3. The prefix length is allowed to have leading 0s.
//
// Alternatively, when validating an update to an existing field, you can pass a list of
// CIDR values from the old object that should be accepted if they appear in the new
// object even if they are not valid.
//
// This function should only be used to validate the existing fields that were
// historically validated in this way, and strictValidation should be true unless the
// StrictIPCIDRValidation feature gate is disabled. Use IsValidCIDR or
// IsValidInterfaceAddress for parsing new fields.
func IsValidCIDRForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldCIDRs []string) field.ErrorList {
if slices.Contains(validOldCIDRs, value) {
return nil
}
_, allErrors := parseCIDR(fldPath, value, strictValidation)
return allErrors
}
// IsValidCIDR tests that the argument is a valid CIDR value, according to current
// Kubernetes standards for CIDR validation. This function is only for
// "subnet"/"mask"-style CIDR values (e.g., "192.168.1.0/24", with no bits set beyond the
// prefix length). Use IsValidInterfaceAddress for "ifaddr"-style CIDR values.
func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList {
ipnet, allErrors := parseCIDR(fldPath, value, true)
if len(allErrors) != 0 {
return allErrors
}
if value != ipnet.String() {
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ipnet.String())))
}
return allErrors
}
// GetWarningsForCIDR returns warnings for CIDR values in non-standard forms. This should
// only be used with fields that are validated with IsValidCIDRForLegacyField().
func GetWarningsForCIDR(fldPath *field.Path, value string) []string {
ip, ipnet, err := netutils.ParseCIDRSloppy(value)
if err != nil {
klog.ErrorS(err, "GetWarningsForCIDR called on value that was not validated with IsValidCIDRForLegacyField", "field", fldPath, "value", value)
return nil
}
var warnings []string
// Check for bits set after prefix length
if !ip.Equal(ipnet.IP) {
_, addrlen := ipnet.Mask.Size()
singleIPCIDR := fmt.Sprintf("%s/%d", ip.String(), addrlen)
warnings = append(warnings,
fmt.Sprintf("%s: CIDR value %q is ambiguous in this context (should be %q or %q?)", fldPath, value, ipnet.String(), singleIPCIDR),
)
}
prefix, _ := netip.ParsePrefix(value)
addr := prefix.Addr()
if !prefix.IsValid() || addr.Is4In6() {
// This catches 2 cases: leading 0s (if ParseCIDRSloppy() accepted it but
// ParsePrefix() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way,
// re-stringifying the net.IPNet value will give the preferred form.
warnings = append(warnings,
fmt.Sprintf("%s: non-standard CIDR value %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ipnet.String()),
)
}
// If ParseCIDRSloppy() and ParsePrefix() both accept it then it's fully valid,
// though it may be non-canonical. But only check this if there are no other
// warnings, since either of the other warnings would also cause a round-trip
// failure.
if len(warnings) == 0 && addr.Is6() && prefix.String() != value {
warnings = append(warnings,
fmt.Sprintf("%s: IPv6 CIDR value %q should be in RFC 5952 canonical format (%q)", fldPath, value, prefix.String()),
)
}
return warnings
}
// IsValidInterfaceAddress tests that the argument is a valid "ifaddr"-style CIDR value in
// canonical form (e.g., "192.168.1.5/24", with a complete IP address and associated
// subnet length). Use IsValidCIDR for "subnet"/"mask"-style CIDR values (e.g.,
// "192.168.1.0/24").
func IsValidInterfaceAddress(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
ip, ipnet, err := netutils.ParseCIDRSloppy(value)
if err != nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid address in CIDR form, (e.g. 10.9.8.7/24 or 2001:db8::1/64)"))
return allErrors
}
// The canonical form of `value` is not `ipnet.String()`, because `ipnet` doesn't
// include the bits after the prefix. We need to construct the canonical form
// ourselves from `ip` and `ipnet.Mask`.
maskSize, _ := ipnet.Mask.Size()
if netutils.IsIPv4(ip) && maskSize > net.IPv4len*8 {
// "::ffff:192.168.0.1/120" -> "192.168.0.1/24"
maskSize -= (net.IPv6len - net.IPv4len) * 8
}
canonical := fmt.Sprintf("%s/%d", ip.String(), maskSize)
if value != canonical {
allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", canonical)))
}
return allErrors
}

View File

@ -24,7 +24,6 @@ import (
"unicode"
"k8s.io/apimachinery/pkg/util/validation/field"
netutils "k8s.io/utils/net"
)
const qnameCharFmt string = "[A-Za-z0-9]"
@ -369,45 +368,6 @@ func IsValidPortName(port string) []string {
return errs
}
// IsValidIP tests that the argument is a valid IP address.
func IsValidIP(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
if netutils.ParseIPSloppy(value) == nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"))
}
return allErrors
}
// IsValidIPv4Address tests that the argument is a valid IPv4 address.
func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
ip := netutils.ParseIPSloppy(value)
if ip == nil || ip.To4() == nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
}
return allErrors
}
// IsValidIPv6Address tests that the argument is a valid IPv6 address.
func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
ip := netutils.ParseIPSloppy(value)
if ip == nil || ip.To4() != nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
}
return allErrors
}
// IsValidCIDR tests that the argument is a valid CIDR value.
func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
_, _, err := netutils.ParseCIDRSloppy(value)
if err != nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)"))
}
return allErrors
}
const percentFmt string = "[0-9]+%"
const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"

View File

@ -16,4 +16,4 @@ limitations under the License.
// Package watch contains a generic watchable interface, and a fake for
// testing code that uses the watch interface.
package watch // import "k8s.io/apimachinery/pkg/watch"
package watch

View File

@ -51,6 +51,7 @@ type Reporter interface {
// StreamWatcher turns any stream for which you can write a Decoder interface
// into a watch.Interface.
type StreamWatcher struct {
logger klog.Logger
sync.Mutex
source Decoder
reporter Reporter
@ -59,8 +60,16 @@ type StreamWatcher struct {
}
// NewStreamWatcher creates a StreamWatcher from the given decoder.
//
// Contextual logging: NewStreamWatcherWithLogger should be used instead of NewStreamWatcher in code which supports contextual logging.
func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher {
return NewStreamWatcherWithLogger(klog.Background(), d, r)
}
// NewStreamWatcherWithLogger creates a StreamWatcher from the given decoder and logger.
func NewStreamWatcherWithLogger(logger klog.Logger, d Decoder, r Reporter) *StreamWatcher {
sw := &StreamWatcher{
logger: logger,
source: d,
reporter: r,
// It's easy for a consumer to add buffering via an extra
@ -98,7 +107,7 @@ func (sw *StreamWatcher) Stop() {
// receive reads result from the decoder in a loop and sends down the result channel.
func (sw *StreamWatcher) receive() {
defer utilruntime.HandleCrash()
defer utilruntime.HandleCrashWithLogger(sw.logger)
defer close(sw.result)
defer sw.Stop()
for {
@ -108,10 +117,10 @@ func (sw *StreamWatcher) receive() {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
sw.logger.V(1).Info("Unexpected EOF during watch stream event decoding", "err", err)
default:
if net.IsProbableEOF(err) || net.IsTimeout(err) {
klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
sw.logger.V(5).Info("Unable to decode an event from the watch stream", "err", err)
} else {
select {
case <-sw.done:

View File

@ -23,6 +23,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
)
// Interface can be implemented by anything that knows how to watch and report changes.
@ -103,21 +104,34 @@ func (w emptyWatch) ResultChan() <-chan Event {
// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
type FakeWatcher struct {
logger klog.Logger
result chan Event
stopped bool
sync.Mutex
}
var _ Interface = &FakeWatcher{}
// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging.
func NewFake() *FakeWatcher {
return NewFakeWithOptions(FakeOptions{})
}
// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging.
func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
return NewFakeWithOptions(FakeOptions{ChannelSize: size})
}
func NewFakeWithOptions(options FakeOptions) *FakeWatcher {
return &FakeWatcher{
result: make(chan Event),
logger: ptr.Deref(options.Logger, klog.Background()),
result: make(chan Event, options.ChannelSize),
}
}
func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
return &FakeWatcher{
result: make(chan Event, size),
}
type FakeOptions struct {
Logger *klog.Logger
ChannelSize int
}
// Stop implements Interface.Stop().
@ -125,7 +139,7 @@ func (f *FakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.stopped {
klog.V(4).Infof("Stopping fake watcher.")
f.logger.V(4).Info("Stopping fake watcher")
close(f.result)
f.stopped = true
}
@ -176,13 +190,22 @@ func (f *FakeWatcher) Action(action EventType, obj runtime.Object) {
// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
type RaceFreeFakeWatcher struct {
logger klog.Logger
result chan Event
Stopped bool
sync.Mutex
}
var _ Interface = &RaceFreeFakeWatcher{}
// Contextual logging: RaceFreeFakeWatcherWithLogger should be used instead of NewRaceFreeFake in code which supports contextual logging.
func NewRaceFreeFake() *RaceFreeFakeWatcher {
return NewRaceFreeFakeWithLogger(klog.Background())
}
func NewRaceFreeFakeWithLogger(logger klog.Logger) *RaceFreeFakeWatcher {
return &RaceFreeFakeWatcher{
logger: logger,
result: make(chan Event, DefaultChanSize),
}
}
@ -192,7 +215,7 @@ func (f *RaceFreeFakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
klog.V(4).Infof("Stopping fake watcher.")
f.logger.V(4).Info("Stopping fake watcher")
close(f.result)
f.Stopped = true
}

View File

@ -14,10 +14,6 @@ github.com/go-logr/logr
## explicit; go 1.15
github.com/gogo/protobuf/proto
github.com/gogo/protobuf/sortkeys
# github.com/google/gofuzz v1.2.0
## explicit; go 1.12
github.com/google/gofuzz
github.com/google/gofuzz/bytesource
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
@ -65,13 +61,14 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
# k8s.io/api v0.32.3
## explicit; go 1.23.0
# k8s.io/api v0.33.0
## explicit; go 1.24.0
k8s.io/api/core/v1
k8s.io/api/rbac/v1
k8s.io/api/storage/v1
# k8s.io/apimachinery v0.32.3
## explicit; go 1.23.0
# k8s.io/apimachinery v0.33.0
## explicit; go 1.24.0
k8s.io/apimachinery/pkg/api/operation
k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/conversion
@ -113,7 +110,11 @@ k8s.io/utils/ptr
## explicit; go 1.21
sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json
# sigs.k8s.io/structured-merge-diff/v4 v4.4.2
# sigs.k8s.io/randfill v1.0.0
## explicit; go 1.18
sigs.k8s.io/randfill
sigs.k8s.io/randfill/bytesource
# sigs.k8s.io/structured-merge-diff/v4 v4.6.0
## explicit; go 1.13
sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.4.0

43
api/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,43 @@
# Contributing Guidelines
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
## Getting Started
We have full documentation on how to get started contributing here:
<!---
If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
-->
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing)
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
## Mentorship
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
<!---
Custom Information - if you're copying this template for the first time you can add custom content here, for example:
## Contact Information
- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel.
- [Mailing list](URL)
-->
## Project Management
The [maintainers](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES#L12) of this project (and often others who have official positions on the [contributor ladder](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES)) are responsible for performing project management which oversees development and maintenance of the API, tests, tools, e.t.c. While we try to be generally flexible when it comes to the management of individual pieces (such as Issues or PRs), we have some rules and guidelines which help us plan, coordinate and reduce waste. In this section you'll find some rules/guidelines for contributors related to project management which may extend or go beyond what you would find in the standard [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide).
### Bumping stale and closed Issues & PRs
Maintainers are ultimately responsible for triaging new issues and PRs, accepting or declining them, deciding priority and fitting them into milestones intended for future releases. Bots are responsible for marking issues and PRs which stagnate as stale, or closing them if progress does not continue for a long period of time. Due to the nature of this community-driven development effort (we do not have dedicated engineering resources, we rely on the community which is effectively "volunteer time") **not all issues can be accepted, prioritized or completed**.
You may find times when an issue you're subscribed to and interested in seems to stagnate, or perhaps gets auto-closed. Prior to bumping or directly re-opening issues yourself, we generally ask that you bring these up for discussion on the agenda for one of our community syncs if possible, or bring them up for discussion in Slack or the mailing list as this gives us a better opportunity to discuss the issue and determine viability and logistics. If feasible we **highly recommend being ready to contribute directly** to any stale or unprioritized effort that you want to see move forward, as **the best way to ensure progress is to engage with the community and personally invest time**.
We (the community) aren't opposed to making exceptions in some cases, but when in doubt please follow the above guidelines before bumping closed or stale issues if you're not ready to personally invest time in them. We are responsible for managing these and without further context or engagement we may set these back to how they were previously organized.

View File

@ -1,4 +1,3 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@ -179,7 +178,7 @@
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@ -187,7 +186,8 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2014 The gofuzz Authors
Copyright 2025 The Kubernetes Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

24
api/vendor/sigs.k8s.io/randfill/NOTICE generated vendored Normal file
View File

@ -0,0 +1,24 @@
When donating the randfill project to the CNCF, we could not reach all the
gofuzz contributors to sign the CNCF CLA. As such, according to the CNCF rules
to donate a repository, we must add a NOTICE referencing section 7 of the CLA
with a list of developers who could not be reached.
`7. Should You wish to submit work that is not Your original creation, You may
submit it to the Foundation separately from any Contribution, identifying the
complete details of its source and of any license or other restriction
(including, but not limited to, related patents, trademarks, and license
agreements) of which you are personally aware, and conspicuously marking the
work as "Submitted on behalf of a third-party: [named here]".`
Submitted on behalf of a third-party: @dnephin (Daniel Nephin)
Submitted on behalf of a third-party: @AlekSi (Alexey Palazhchenko)
Submitted on behalf of a third-party: @bbigras (Bruno Bigras)
Submitted on behalf of a third-party: @samirkut (Samir)
Submitted on behalf of a third-party: @posener (Eyal Posener)
Submitted on behalf of a third-party: @Ashikpaul (Ashik Paul)
Submitted on behalf of a third-party: @kwongtailau (Kwongtai)
Submitted on behalf of a third-party: @ericcornelissen (Eric Cornelissen)
Submitted on behalf of a third-party: @eclipseo (Robert-André Mauchin)
Submitted on behalf of a third-party: @yanzhoupan (Andrew Pan)
Submitted on behalf of a third-party: @STRRL (Zhiqiang ZHOU)
Submitted on behalf of a third-party: @disconnect3d (Disconnect3d)

8
api/vendor/sigs.k8s.io/randfill/OWNERS generated vendored Normal file
View File

@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
# See the OWNERS_ALIASES file at https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES for a list of members for each alias.
approvers:
- sig-testing-leads
- thockin
reviewers: []

14
api/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES generated vendored Normal file
View File

@ -0,0 +1,14 @@
# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
# This file should be kept in sync with k/org.
aliases:
# Reference: https://github.com/kubernetes/org/blob/main/OWNERS_ALIASES
sig-testing-leads:
- BenTheElder
- alvaroaleman
- aojea
- cjwagner
- jbpratt
- michelle192837
- pohly
- xmcqueen

View File

@ -1,39 +1,46 @@
gofuzz
randfill
======
gofuzz is a library for populating go objects with random values.
randfill is a library for populating go objects with random values.
[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz)
[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
This is a fork of github.com/google/gofuzz, which was archived.
NOTE: This repo is supported only for use within Kubernetes. It is not our
intention to support general use. That said, if it works for you, that's
great! If you have a problem, please feel free to file an issue, but be aware
that it may not be a priority for us to fix it unless it is affecting
Kubernetes. PRs are welcome, within reason.
[![GoDoc](https://godoc.org/sigs.k8s.io/randfill?status.svg)](https://godoc.org/sigs.k8s.io/randfill)
This is useful for testing:
* Do your project's objects really serialize/unserialize correctly in all cases?
* Is there an incorrectly formatted object that will cause your project to panic?
Import with ```import "github.com/google/gofuzz"```
Import with ```import "sigs.k8s.io/randfill"```
You can use it on single variables:
```go
f := fuzz.New()
f := randfill.New()
var myInt int
f.Fuzz(&myInt) // myInt gets a random value.
f.Fill(&myInt) // myInt gets a random value.
```
You can use it on maps:
```go
f := fuzz.New().NilChance(0).NumElements(1, 1)
f := randfill.New().NilChance(0).NumElements(1, 1)
var myMap map[ComplexKeyType]string
f.Fuzz(&myMap) // myMap will have exactly one element.
f.Fill(&myMap) // myMap will have exactly one element.
```
Customize the chance of getting a nil pointer:
```go
f := fuzz.New().NilChance(.5)
f := randfill.New().NilChance(.5)
var fancyStruct struct {
A, B, C, D *string
}
f.Fuzz(&fancyStruct) // About half the pointers should be set.
f.Fill(&fancyStruct) // About half the pointers should be set.
```
You can even customize the randomization completely if needed:
@ -49,25 +56,27 @@ type MyInfo struct {
BInfo *string
}
f := fuzz.New().NilChance(0).Funcs(
func(e *MyInfo, c fuzz.Continue) {
f := randfill.New().NilChance(0).Funcs(
func(e *MyInfo, c randfill.Continue) {
switch c.Intn(2) {
case 0:
e.Type = A
c.Fuzz(&e.AInfo)
c.Fill(&e.AInfo)
case 1:
e.Type = B
c.Fuzz(&e.BInfo)
c.Fill(&e.BInfo)
}
},
)
var myObject MyInfo
f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
f.Fill(&myObject) // Type will correspond to whether A or B info is set.
```
See more examples in ```example_test.go```.
## dvyukov/go-fuzz integration
You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing.
go-fuzz provides the user a byte-slice, which should be converted to different inputs
for the tested function. This library can help convert the byte slice. Consider for
@ -76,11 +85,11 @@ example a fuzz test for a the function `mypackage.MyFunc` that takes an int argu
// +build gofuzz
package mypackage
import fuzz "github.com/google/gofuzz"
import "sigs.k8s.io/randfill"
func Fuzz(data []byte) int {
var i int
fuzz.NewFromGoFuzz(data).Fuzz(&i)
randfill.NewFromGoFuzz(data).Fill(&i)
MyFunc(i)
return 0
}

16
api/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS generated vendored Normal file
View File

@ -0,0 +1,16 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Committee to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
thockin
BenTheElder
aojea
pohly

3
api/vendor/sigs.k8s.io/randfill/code-of-conduct.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)

682
api/vendor/sigs.k8s.io/randfill/randfill.go generated vendored Normal file
View File

@ -0,0 +1,682 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Copyright 2014 The gofuzz Authors.
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package randfill is a library for populating go objects with random values.
package randfill
import (
"fmt"
"math/rand"
"reflect"
"regexp"
"sync"
"time"
"unsafe"
"strings"
"sigs.k8s.io/randfill/bytesource"
)
// funcMap is a map from a type to a function that randfills that type. The
// function is a reflect.Value because the type being filled is different for
// each func.
type funcMap map[reflect.Type]reflect.Value
// Filler knows how to fill any object with random fields.
type Filler struct {
customFuncs funcMap
defaultFuncs funcMap
r *rand.Rand
nilChance float64
minElements int
maxElements int
maxDepth int
allowUnexportedFields bool
skipFieldPatterns []*regexp.Regexp
lock sync.Mutex
}
// New returns a new Filler. Customize your Filler further by calling Funcs,
// RandSource, NilChance, or NumElements in any order.
func New() *Filler {
return NewWithSeed(time.Now().UnixNano())
}
func NewWithSeed(seed int64) *Filler {
f := &Filler{
defaultFuncs: funcMap{
reflect.TypeOf(&time.Time{}): reflect.ValueOf(randfillTime),
},
customFuncs: funcMap{},
r: rand.New(rand.NewSource(seed)),
nilChance: .2,
minElements: 1,
maxElements: 10,
maxDepth: 100,
allowUnexportedFields: false,
}
return f
}
// NewFromGoFuzz is a helper function that enables using randfill (this
// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous
// fuzzing. Essentially, it enables translating the fuzzing bytes from
// go-fuzz to any Go object using this library.
//
// This implementation promises a constant translation from a given slice of
// bytes to the fuzzed objects. This promise will remain over future
// versions of Go and of this library.
//
// Note: the returned Filler should not be shared between multiple goroutines,
// as its deterministic output will no longer be available.
//
// Example: use go-fuzz to test the function `MyFunc(int)` in the package
// `mypackage`. Add the file: "mypackage_fuzz.go" with the content:
//
// // +build gofuzz
// package mypackage
// import "sigs.k8s.io/randfill"
//
// func Fuzz(data []byte) int {
// var i int
// randfill.NewFromGoFuzz(data).Fill(&i)
// MyFunc(i)
// return 0
// }
func NewFromGoFuzz(data []byte) *Filler {
return New().RandSource(bytesource.New(data))
}
// Funcs registers custom fill functions for this Filler.
//
// Each entry in customFuncs must be a function taking two parameters.
// The first parameter must be a pointer or map. It is the variable that
// function will fill with random data. The second parameter must be a
// randfill.Continue, which will provide a source of randomness and a way
// to automatically continue filling smaller pieces of the first parameter.
//
// These functions are called sensibly, e.g., if you wanted custom string
// filling, the function `func(s *string, c randfill.Continue)` would get
// called and passed the address of strings. Maps and pointers will always
// be made/new'd for you, ignoring the NilChance option. For slices, it
// doesn't make much sense to pre-create them--Filler doesn't know how
// long you want your slice--so take a pointer to a slice, and make it
// yourself. (If you don't want your map/pointer type pre-made, take a
// pointer to it, and make it yourself.) See the examples for a range of
// custom functions.
//
// If a function is already registered for a type, and a new function is
// provided, the previous function will be replaced with the new one.
func (f *Filler) Funcs(customFuncs ...interface{}) *Filler {
for i := range customFuncs {
v := reflect.ValueOf(customFuncs[i])
if v.Kind() != reflect.Func {
panic("Filler.Funcs: all arguments must be functions")
}
t := v.Type()
if t.NumIn() != 2 || t.NumOut() != 0 {
panic("Filler.Funcs: all customFuncs must have 2 arguments and 0 returns")
}
argT := t.In(0)
switch argT.Kind() {
case reflect.Ptr, reflect.Map:
default:
panic("Filler.Funcs: customFuncs' first argument must be a pointer or map type")
}
if t.In(1) != reflect.TypeOf(Continue{}) {
panic("Filler.Funcs: customFuncs' second argument must be a randfill.Continue")
}
f.customFuncs[argT] = v
}
return f
}
// RandSource causes this Filler to get values from the given source of
// randomness. Use this if you want deterministic filling.
func (f *Filler) RandSource(s rand.Source) *Filler {
f.r = rand.New(s)
return f
}
// NilChance sets the probability of creating a nil pointer, map, or slice to
// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
func (f *Filler) NilChance(p float64) *Filler {
if p < 0 || p > 1 {
panic("Filler.NilChance: p must be between 0 and 1, inclusive")
}
f.nilChance = p
return f
}
// NumElements sets the minimum and maximum number of elements that will be
// added to a non-nil map or slice.
func (f *Filler) NumElements(min, max int) *Filler {
if min < 0 {
panic("Filler.NumElements: min must be >= 0")
}
if min > max {
panic("Filler.NumElements: min must be <= max")
}
f.minElements = min
f.maxElements = max
return f
}
func (f *Filler) genElementCount() int {
if f.minElements == f.maxElements {
return f.minElements
}
return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
}
func (f *Filler) genShouldFill() bool {
return f.r.Float64() >= f.nilChance
}
// MaxDepth sets the maximum number of recursive fill calls that will be made
// before stopping. This includes struct members, pointers, and map and slice
// elements.
func (f *Filler) MaxDepth(d int) *Filler {
f.maxDepth = d
return f
}
// AllowUnexportedFields defines whether to fill unexported fields.
func (f *Filler) AllowUnexportedFields(flag bool) *Filler {
f.allowUnexportedFields = flag
return f
}
// SkipFieldsWithPattern tells this Filler to skip any field whose name matches
// the supplied pattern. Call this multiple times if needed. This is useful to
// skip XXX_ fields generated by protobuf.
func (f *Filler) SkipFieldsWithPattern(pattern *regexp.Regexp) *Filler {
f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
return f
}
// SimpleSelfFiller represents an object that knows how to randfill itself.
//
// Unlike NativeSelfFiller, this interface does not cause the type in question
// to depend on the randfill package. This is most useful for simple types. For
// more complex types, consider using NativeSelfFiller.
type SimpleSelfFiller interface {
// RandFill fills the current object with random data.
RandFill(r *rand.Rand)
}
// NativeSelfFiller represents an object that knows how to randfill itself.
//
// Unlike SimpleSelfFiller, this interface allows for recursive filling of
// child objects with the same rules as the parent Filler.
type NativeSelfFiller interface {
// RandFill fills the current object with random data.
RandFill(c Continue)
}
// Fill recursively fills all of obj's fields with something random. First
// this tries to find a custom fill function (see Funcs). If there is no
// custom function, this tests whether the object implements SimpleSelfFiller
// or NativeSelfFiller and if so, calls RandFill on it to fill itself. If that
// fails, this will see if there is a default fill function provided by this
// package. If all of that fails, this will generate random values for all
// primitive fields and then recurse for all non-primitives.
//
// This is safe for cyclic or tree-like structs, up to a limit. Use the
// MaxDepth method to adjust how deep you need it to recurse.
//
// obj must be a pointer. Exported (public) fields can always be set, and if
// the AllowUnexportedFields() modifier was called it can try to set unexported
// (private) fields, too.
//
// This is intended for tests, so will panic on bad input or unimplemented
// types. This method takes a lock for the whole Filler, so it is not
// reentrant. See Continue.
func (f *Filler) Fill(obj interface{}) {
f.lock.Lock()
defer f.lock.Unlock()
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("Filler.Fill: obj must be a pointer")
}
v = v.Elem()
f.fillWithContext(v, 0)
}
// FillNoCustom is just like Fill, except that any custom fill function for
// obj's type will not be called and obj will not be tested for
// SimpleSelfFiller or NativeSelfFiller. This applies only to obj and not other
// instances of obj's type or to obj's child fields.
//
// obj must be a pointer. Exported (public) fields can always be set, and if
// the AllowUnexportedFields() modifier was called it can try to set unexported
// (private) fields, too.
//
// This is intended for tests, so will panic on bad input or unimplemented
// types. This method takes a lock for the whole Filler, so it is not
// reentrant. See Continue.
func (f *Filler) FillNoCustom(obj interface{}) {
f.lock.Lock()
defer f.lock.Unlock()
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("Filler.FillNoCustom: obj must be a pointer")
}
v = v.Elem()
f.fillWithContext(v, flagNoCustomFill)
}
const (
// Do not try to find a custom fill function. Does not apply recursively.
flagNoCustomFill uint64 = 1 << iota
)
func (f *Filler) fillWithContext(v reflect.Value, flags uint64) {
fc := &fillerContext{filler: f}
fc.doFill(v, flags)
}
// fillerContext carries context about a single filling run, which lets Filler
// be thread-safe.
type fillerContext struct {
filler *Filler
curDepth int
}
func (fc *fillerContext) doFill(v reflect.Value, flags uint64) {
if fc.curDepth >= fc.filler.maxDepth {
return
}
fc.curDepth++
defer func() { fc.curDepth-- }()
if !v.CanSet() {
if !fc.filler.allowUnexportedFields || !v.CanAddr() {
return
}
v = reflect.NewAt(v.Type(), unsafe.Pointer(v.UnsafeAddr())).Elem()
}
if flags&flagNoCustomFill == 0 {
// Check for both pointer and non-pointer custom functions.
if v.CanAddr() && fc.tryCustom(v.Addr()) {
return
}
if fc.tryCustom(v) {
return
}
}
if fn, ok := fillFuncMap[v.Kind()]; ok {
fn(v, fc.filler.r)
return
}
switch v.Kind() {
case reflect.Map:
if fc.filler.genShouldFill() {
v.Set(reflect.MakeMap(v.Type()))
n := fc.filler.genElementCount()
for i := 0; i < n; i++ {
key := reflect.New(v.Type().Key()).Elem()
fc.doFill(key, 0)
val := reflect.New(v.Type().Elem()).Elem()
fc.doFill(val, 0)
v.SetMapIndex(key, val)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Ptr:
if fc.filler.genShouldFill() {
v.Set(reflect.New(v.Type().Elem()))
fc.doFill(v.Elem(), 0)
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Slice:
if fc.filler.genShouldFill() {
n := fc.filler.genElementCount()
v.Set(reflect.MakeSlice(v.Type(), n, n))
for i := 0; i < n; i++ {
fc.doFill(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Array:
if fc.filler.genShouldFill() {
n := v.Len()
for i := 0; i < n; i++ {
fc.doFill(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
skipField := false
fieldName := v.Type().Field(i).Name
for _, pattern := range fc.filler.skipFieldPatterns {
if pattern.MatchString(fieldName) {
skipField = true
break
}
}
if !skipField {
fc.doFill(v.Field(i), 0)
}
}
case reflect.Chan:
fallthrough
case reflect.Func:
fallthrough
case reflect.Interface:
fallthrough
default:
panic(fmt.Sprintf("can't fill type %v, kind %v", v.Type(), v.Kind()))
}
}
// tryCustom searches for custom handlers, and returns true iff it finds a match
// and successfully randomizes v.
func (fc *fillerContext) tryCustom(v reflect.Value) bool {
// First: see if we have a fill function for it.
doCustom, ok := fc.filler.customFuncs[v.Type()]
if !ok {
// Second: see if it can fill itself.
if v.CanInterface() {
intf := v.Interface()
if fillable, ok := intf.(SimpleSelfFiller); ok {
fillable.RandFill(fc.filler.r)
return true
}
if fillable, ok := intf.(NativeSelfFiller); ok {
fillable.RandFill(Continue{fc: fc, Rand: fc.filler.r})
return true
}
}
// Finally: see if there is a default fill function.
doCustom, ok = fc.filler.defaultFuncs[v.Type()]
if !ok {
return false
}
}
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() {
if !v.CanSet() {
return false
}
v.Set(reflect.New(v.Type().Elem()))
}
case reflect.Map:
if v.IsNil() {
if !v.CanSet() {
return false
}
v.Set(reflect.MakeMap(v.Type()))
}
default:
return false
}
doCustom.Call([]reflect.Value{
v,
reflect.ValueOf(Continue{
fc: fc,
Rand: fc.filler.r,
}),
})
return true
}
// Continue can be passed to custom fill functions to allow them to use
// the correct source of randomness and to continue filling their members.
type Continue struct {
fc *fillerContext
// For convenience, Continue implements rand.Rand via embedding.
// Use this for generating any randomness if you want your filling
// to be repeatable for a given seed.
*rand.Rand
}
// Fill continues filling obj. obj must be a pointer or a reflect.Value of a
// pointer. See Filler.Fill.
func (c Continue) Fill(obj interface{}) {
v, ok := obj.(reflect.Value)
if !ok {
v = reflect.ValueOf(obj)
}
if v.Kind() != reflect.Ptr {
panic("Continue.Fill: obj must be a pointer")
}
v = v.Elem()
c.fc.doFill(v, 0)
}
// FillNoCustom continues filling obj, except that any custom fill function for
// obj's type will not be called and obj will not be tested for
// SimpleSelfFiller or NativeSelfFiller. See Filler.FillNoCustom.
func (c Continue) FillNoCustom(obj interface{}) {
v, ok := obj.(reflect.Value)
if !ok {
v = reflect.ValueOf(obj)
}
if v.Kind() != reflect.Ptr {
panic("Continue.FillNoCustom: obj must be a pointer")
}
v = v.Elem()
c.fc.doFill(v, flagNoCustomFill)
}
const defaultStringMaxLen = 20
// String makes a random string up to n characters long. If n is 0, the default
// size range is [0-20). The returned string may include a variety of (valid)
// UTF-8 encodings.
func (c Continue) String(n int) string {
return randString(c.Rand, n)
}
// Uint64 makes random 64 bit numbers.
// Weirdly, rand doesn't have a function that gives you 64 random bits.
func (c Continue) Uint64() uint64 {
return randUint64(c.Rand)
}
// Bool returns true or false randomly.
func (c Continue) Bool() bool {
return randBool(c.Rand)
}
func fillInt(v reflect.Value, r *rand.Rand) {
v.SetInt(int64(randUint64(r)))
}
func fillUint(v reflect.Value, r *rand.Rand) {
v.SetUint(randUint64(r))
}
func randfillTime(t *time.Time, c Continue) {
var sec, nsec int64
// Allow for about 1000 years of random time values, which keeps things
// like JSON parsing reasonably happy.
sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
// Nanosecond values greater than 1Bn are technically allowed but result in
// time.Time values with invalid timezone offsets.
nsec = c.Rand.Int63n(999999999)
*t = time.Unix(sec, nsec)
}
var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
reflect.Bool: func(v reflect.Value, r *rand.Rand) {
v.SetBool(randBool(r))
},
reflect.Int: fillInt,
reflect.Int8: fillInt,
reflect.Int16: fillInt,
reflect.Int32: fillInt,
reflect.Int64: fillInt,
reflect.Uint: fillUint,
reflect.Uint8: fillUint,
reflect.Uint16: fillUint,
reflect.Uint32: fillUint,
reflect.Uint64: fillUint,
reflect.Uintptr: fillUint,
reflect.Float32: func(v reflect.Value, r *rand.Rand) {
v.SetFloat(float64(r.Float32()))
},
reflect.Float64: func(v reflect.Value, r *rand.Rand) {
v.SetFloat(r.Float64())
},
reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
v.SetComplex(complex128(complex(r.Float32(), r.Float32())))
},
reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
v.SetComplex(complex(r.Float64(), r.Float64()))
},
reflect.String: func(v reflect.Value, r *rand.Rand) {
v.SetString(randString(r, 0))
},
reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
panic("filling of UnsafePointers is not implemented")
},
}
// randBool returns true or false randomly.
func randBool(r *rand.Rand) bool {
return r.Int31()&(1<<30) == 0
}
type int63nPicker interface {
Int63n(int64) int64
}
// UnicodeRange describes a sequential range of unicode characters.
// Last must be numerically greater than First.
type UnicodeRange struct {
First, Last rune
}
// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters.
// To be useful, each range must have at least one character (First <= Last) and
// there must be at least one range.
type UnicodeRanges []UnicodeRange
// choose returns a random unicode character from the given range, using the
// given randomness source.
func (ur UnicodeRange) choose(r int63nPicker) rune {
count := int64(ur.Last - ur.First + 1)
return ur.First + rune(r.Int63n(count))
}
// CustomStringFillFunc constructs a FillFunc which produces random strings.
// Each character is selected from the range ur. If there are no characters
// in the range (cr.Last < cr.First), this will panic.
func (ur UnicodeRange) CustomStringFillFunc(n int) func(s *string, c Continue) {
ur.check()
return func(s *string, c Continue) {
*s = ur.randString(c.Rand, n)
}
}
// check is a function that used to check whether the first of ur(UnicodeRange)
// is greater than the last one.
func (ur UnicodeRange) check() {
if ur.Last < ur.First {
panic("UnicodeRange.check: the last encoding must be greater than the first")
}
}
// randString of UnicodeRange makes a random string up to 20 characters long.
// Each character is selected form ur(UnicodeRange).
func (ur UnicodeRange) randString(r *rand.Rand, max int) string {
if max == 0 {
max = defaultStringMaxLen
}
n := r.Intn(max)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur.choose(r))
}
return sb.String()
}
// defaultUnicodeRanges sets a default unicode range when users do not set
// CustomStringFillFunc() but want to fill strings.
var defaultUnicodeRanges = UnicodeRanges{
{' ', '~'}, // ASCII characters
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
}
// CustomStringFillFunc constructs a FillFunc which produces random strings.
// Each character is selected from one of the ranges of ur(UnicodeRanges).
// Each range has an equal probability of being chosen. If there are no ranges,
// or a selected range has no characters (.Last < .First), this will panic.
// Do not modify any of the ranges in ur after calling this function.
func (ur UnicodeRanges) CustomStringFillFunc(n int) func(s *string, c Continue) {
// Check unicode ranges slice is empty.
if len(ur) == 0 {
panic("UnicodeRanges is empty")
}
// if not empty, each range should be checked.
for i := range ur {
ur[i].check()
}
return func(s *string, c Continue) {
*s = ur.randString(c.Rand, n)
}
}
// randString of UnicodeRanges makes a random string up to 20 characters long.
// Each character is selected form one of the ranges of ur(UnicodeRanges),
// and each range has an equal probability of being chosen.
func (ur UnicodeRanges) randString(r *rand.Rand, max int) string {
if max == 0 {
max = defaultStringMaxLen
}
n := r.Intn(max)
sb := strings.Builder{}
sb.Grow(n)
for i := 0; i < n; i++ {
sb.WriteRune(ur[r.Intn(len(ur))].choose(r))
}
return sb.String()
}
// randString makes a random string up to 20 characters long. The returned string
// may include a variety of (valid) UTF-8 encodings.
func randString(r *rand.Rand, max int) string {
return defaultUnicodeRanges.randString(r, max)
}
// randUint64 makes random 64 bit numbers.
// Weirdly, rand doesn't have a function that gives you 64 random bits.
func randUint64(r *rand.Rand) uint64 {
return uint64(r.Uint32())<<32 | uint64(r.Uint32())
}

View File

@ -43,7 +43,7 @@ func IntCompare(lhs, rhs int64) int {
func BoolCompare(lhs, rhs bool) int {
if lhs == rhs {
return 0
} else if lhs == false {
} else if !lhs {
return -1
}
return 1