rebase: bump the golang-dependencies group with 1 update

Bumps the golang-dependencies group with 1 update: [golang.org/x/crypto](https://github.com/golang/crypto).


Updates `golang.org/x/crypto` from 0.16.0 to 0.17.0
- [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-12-18 20:31:00 +00:00
committed by mergify[bot]
parent 1ad79314f9
commit e5d9b68d36
398 changed files with 33924 additions and 10753 deletions

View File

@ -0,0 +1,100 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sysctl
import (
"os"
"path"
"strconv"
"strings"
)
const (
sysctlBase = "/proc/sys"
// VMOvercommitMemory refers to the sysctl variable responsible for defining
// the memory over-commit policy used by kernel.
VMOvercommitMemory = "vm/overcommit_memory"
// VMPanicOnOOM refers to the sysctl variable responsible for defining
// the OOM behavior used by kernel.
VMPanicOnOOM = "vm/panic_on_oom"
// KernelPanic refers to the sysctl variable responsible for defining
// the timeout after a panic for the kernel to reboot.
KernelPanic = "kernel/panic"
// KernelPanicOnOops refers to the sysctl variable responsible for defining
// the kernel behavior when an oops or BUG is encountered.
KernelPanicOnOops = "kernel/panic_on_oops"
// RootMaxKeys refers to the sysctl variable responsible for defining
// the maximum number of keys that the root user (UID 0 in the root user namespace) may own.
RootMaxKeys = "kernel/keys/root_maxkeys"
// RootMaxBytes refers to the sysctl variable responsible for defining
// the maximum number of bytes of data that the root user (UID 0 in the root user namespace)
// can hold in the payloads of the keys owned by root.
RootMaxBytes = "kernel/keys/root_maxbytes"
// VMOvercommitMemoryAlways represents that kernel performs no memory over-commit handling.
VMOvercommitMemoryAlways = 1
// VMPanicOnOOMInvokeOOMKiller represents that kernel calls the oom_killer function when OOM occurs.
VMPanicOnOOMInvokeOOMKiller = 0
// KernelPanicOnOopsAlways represents that kernel panics on kernel oops.
KernelPanicOnOopsAlways = 1
// KernelPanicRebootTimeout is the timeout seconds after a panic for the kernel to reboot.
KernelPanicRebootTimeout = 10
// RootMaxKeysSetting is the maximum number of keys that the root user (UID 0 in the root user namespace) may own.
// Needed since docker creates a new key per container.
RootMaxKeysSetting = 1000000
// RootMaxBytesSetting is the maximum number of bytes of data that the root user (UID 0 in the root user namespace)
// can hold in the payloads of the keys owned by root.
// Allocate 25 bytes per key * number of MaxKeys.
RootMaxBytesSetting = RootMaxKeysSetting * 25
)
// Interface is an injectable interface for running sysctl commands.
type Interface interface {
// GetSysctl returns the value for the specified sysctl setting
GetSysctl(sysctl string) (int, error)
// SetSysctl modifies the specified sysctl flag to the new value
SetSysctl(sysctl string, newVal int) error
}
// New returns a new Interface for accessing sysctl
func New() Interface {
return &procSysctl{}
}
// procSysctl implements Interface by reading and writing files under /proc/sys
type procSysctl struct {
}
// GetSysctl returns the value for the specified sysctl setting
func (*procSysctl) GetSysctl(sysctl string) (int, error) {
data, err := os.ReadFile(path.Join(sysctlBase, sysctl))
if err != nil {
return -1, err
}
val, err := strconv.Atoi(strings.Trim(string(data), " \n"))
if err != nil {
return -1, err
}
return val, nil
}
// SetSysctl modifies the specified sysctl flag to the new value
func (*procSysctl) SetSysctl(sysctl string, newVal int) error {
return os.WriteFile(path.Join(sysctlBase, sysctl), []byte(strconv.Itoa(newVal)), 0640)
}

View File

@ -156,7 +156,9 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str
}
r := &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{
Content: map[string]*spec3.MediaType{},
Content: map[string]*spec3.MediaType{},
Description: param.Description(),
Required: param.Required(),
},
}
for _, consume := range consumes {
@ -172,9 +174,9 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str
return nil, nil
}
func newOpenAPI(config *common.Config) openAPI {
func newOpenAPI(config *common.OpenAPIV3Config) openAPI {
o := openAPI{
config: common.ConvertConfigToV3(config),
config: config,
spec: &spec3.OpenAPI{
Version: "3.0.0",
Info: config.Info,
@ -313,12 +315,12 @@ func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error {
// BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
//
// Deprecated: BuildOpenAPISpecFromRoutes should be used instead.
func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) {
func BuildOpenAPISpec(webServices []*restful.WebService, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) {
return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config)
}
// BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it.
func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) {
func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) {
a := newOpenAPI(config)
err := a.buildOpenAPISpec(webServices)
if err != nil {
@ -330,7 +332,7 @@ func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *com
// BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it.
// BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the
// passed type names.
func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (map[string]*spec.Schema, error) {
func BuildOpenAPIDefinitionsForResources(config *common.OpenAPIV3Config, names ...string) (map[string]*spec.Schema, error) {
o := newOpenAPI(config)
// We can discard the return value of toSchema because all we care about is the side effect of calling it.
// All the models created for this resource get added to o.swagger.Definitions

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cache provides a cache mechanism based on etags to lazily
// Package cached provides a cache mechanism based on etags to lazily
// build, and/or cache results from expensive operation such that those
// operations are not repeated unnecessarily. The operations can be
// created as a tree, and replaced dynamically as needed.
@ -25,16 +25,18 @@ limitations under the License.
//
// This package uses a source/transform/sink model of caches to build
// the dependency tree, and can be used as follows:
// - [NewSource]: A source cache that recomputes the content every time.
// - [NewStaticSource]: A source cache that always produces the
// - [Func]: A source cache that recomputes the content every time.
// - [Once]: A source cache that always produces the
// same content, it is only called once.
// - [NewTransformer]: A cache that transforms data from one format to
// - [Transform]: A cache that transforms data from one format to
// another. It's only refreshed when the source changes.
// - [NewMerger]: A cache that aggregates multiple caches into one.
// - [Merge]: A cache that aggregates multiple caches in a map into one.
// It's only refreshed when the source changes.
// - [Replaceable]: A cache adapter that can be atomically
// replaced with a new one, and saves the previous results in case an
// error pops-up.
// - [MergeList]: A cache that aggregates multiple caches in a list into one.
// It's only refreshed when the source changes.
// - [Atomic]: A cache adapter that atomically replaces the source with a new one.
// - [LastSuccess]: A cache adapter that caches the last successful and returns
// it if the next call fails. It extends [Atomic].
//
// # Etags
//
@ -54,61 +56,45 @@ import (
"sync/atomic"
)
// Result is the content returned from a call to a cache. It can either
// be created with [NewResultOK] if the call was a success, or
// [NewResultErr] if the call resulted in an error.
// Value is wrapping a value behind a getter for lazy evaluation.
type Value[T any] interface {
Get() (value T, etag string, err error)
}
// Result is wrapping T and error into a struct for cases where a tuple is more
// convenient or necessary in Golang.
type Result[T any] struct {
Data T
Etag string
Err error
Value T
Etag string
Err error
}
// NewResultOK creates a new [Result] for a successful operation.
func NewResultOK[T any](data T, etag string) Result[T] {
return Result[T]{
Data: data,
Etag: etag,
}
func (r Result[T]) Get() (T, string, error) {
return r.Value, r.Etag, r.Err
}
// NewResultErr creates a new [Result] when an error has happened.
func NewResultErr[T any](err error) Result[T] {
return Result[T]{
Err: err,
}
// Func wraps a (thread-safe) function as a Value[T].
func Func[T any](fn func() (T, string, error)) Value[T] {
return valueFunc[T](fn)
}
// Result can be treated as a [Data] if necessary.
func (r Result[T]) Get() Result[T] {
return r
type valueFunc[T any] func() (T, string, error)
func (c valueFunc[T]) Get() (T, string, error) {
return c()
}
// Data is a cache that performs an action whose result data will be
// cached. It also returns an "etag" identifier to version the cache, so
// that the caller can know if they have the most recent version of the
// cache (and can decide to cache some operation based on that).
//
// The [NewMerger] and [NewTransformer] automatically handle
// that for you by checking if the etag is updated before calling the
// merging or transforming function.
type Data[T any] interface {
// Returns the cached data, as well as an "etag" to identify the
// version of the cache, or an error if something happened.
Get() Result[T]
// Static returns constant values.
func Static[T any](value T, etag string) Value[T] {
return Result[T]{Value: value, Etag: etag}
}
// NewMerger creates a new merge cache, a cache that merges the result
// of other caches. The function only gets called if any of the
// dependency has changed.
// Merge merges a of cached values. The merge function only gets called if any of
// the dependency has changed.
//
// If any of the dependency returned an error before, or any of the
// dependency returned an error this time, or if the mergeFn failed
// before, then the function is reran.
//
// The caches and results are mapped by K so that associated data can be
// retrieved. The map of dependencies can not be modified after
// creation, and a new merger should be created (and probably replaced
// using a [Replaceable]).
// before, then the function is run again.
//
// Note that this assumes there is no "partial" merge, the merge
// function will remerge all the dependencies together everytime. Since
@ -118,18 +104,19 @@ type Data[T any] interface {
// Also note that Golang map iteration is not stable. If the mergeFn
// depends on the order iteration to be stable, it will need to
// implement its own sorting or iteration order.
func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] {
listCaches := make([]Data[T], 0, len(caches))
// maps from index to key
func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, string, error), caches map[K]Value[T]) Value[V] {
list := make([]Value[T], 0, len(caches))
// map from index to key
indexes := make(map[int]K, len(caches))
i := 0
for k := range caches {
listCaches = append(listCaches, caches[k])
list = append(list, caches[k])
indexes[i] = k
i++
}
return NewListMerger(func(results []Result[T]) Result[V] {
return MergeList(func(results []Result[T]) (V, string, error) {
if len(results) != len(indexes) {
panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes)))
}
@ -138,20 +125,11 @@ func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Res
m[indexes[i]] = results[i]
}
return mergeFn(m)
}, listCaches)
}, list)
}
type listMerger[T, V any] struct {
lock sync.Mutex
mergeFn func([]Result[T]) Result[V]
caches []Data[T]
cacheResults []Result[T]
result Result[V]
}
// NewListMerger creates a new merge cache that merges the results of
// other caches in list form. The function only gets called if any of
// the dependency has changed.
// MergeList merges a list of cached values. The function only gets called if
// any of the dependency has changed.
//
// The benefit of ListMerger over the basic Merger is that caches are
// stored in an ordered list so the order of the cache will be
@ -165,31 +143,37 @@ type listMerger[T, V any] struct {
// function will remerge all the dependencies together everytime. Since
// the list of dependencies is constant, there is no way to save some
// partial merge information either.
func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] {
func MergeList[T, V any](mergeFn func(results []Result[T]) (V, string, error), delegates []Value[T]) Value[V] {
return &listMerger[T, V]{
mergeFn: mergeFn,
caches: caches,
mergeFn: mergeFn,
delegates: delegates,
}
}
type listMerger[T, V any] struct {
lock sync.Mutex
mergeFn func([]Result[T]) (V, string, error)
delegates []Value[T]
cache []Result[T]
result Result[V]
}
func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] {
cacheResults := make([]Result[T], len(c.caches))
cacheResults := make([]Result[T], len(c.delegates))
ch := make(chan struct {
int
Result[T]
}, len(c.caches))
for i := range c.caches {
}, len(c.delegates))
for i := range c.delegates {
go func(index int) {
value, etag, err := c.delegates[index].Get()
ch <- struct {
int
Result[T]
}{
index,
c.caches[index].Get(),
}
}{index, Result[T]{Value: value, Etag: etag, Err: err}}
}(i)
}
for i := 0; i < len(c.caches); i++ {
for i := 0; i < len(c.delegates); i++ {
res := <-ch
cacheResults[res.int] = res.Result
}
@ -197,16 +181,16 @@ func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] {
}
func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool {
if c.cacheResults == nil {
if c.cache == nil {
return true
}
if c.result.Err != nil {
return true
}
if len(results) != len(c.cacheResults) {
panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults)))
if len(results) != len(c.cache) {
panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cache)))
}
for i, oldResult := range c.cacheResults {
for i, oldResult := range c.cache {
newResult := results[i]
if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil {
return true
@ -215,98 +199,92 @@ func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool {
return false
}
func (c *listMerger[T, V]) Get() Result[V] {
func (c *listMerger[T, V]) Get() (V, string, error) {
c.lock.Lock()
defer c.lock.Unlock()
cacheResults := c.prepareResultsLocked()
if c.needsRunningLocked(cacheResults) {
c.cacheResults = cacheResults
c.result = c.mergeFn(c.cacheResults)
c.cache = cacheResults
c.result.Value, c.result.Etag, c.result.Err = c.mergeFn(c.cache)
}
return c.result
return c.result.Value, c.result.Etag, c.result.Err
}
// NewTransformer creates a new cache that transforms the result of
// another cache. The transformFn will only be called if the source
// cache has updated the output, otherwise, the cached result will be
// returned.
// Transform the result of another cached value. The transformFn will only be called
// if the source has updated, otherwise, the result will be returned.
//
// If the dependency returned an error before, or it returns an error
// this time, or if the transformerFn failed before, the function is
// reran.
func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] {
return NewListMerger(func(caches []Result[T]) Result[V] {
if len(caches) != 1 {
panic(fmt.Errorf("invalid cache for transformer cache: %v", caches))
func Transform[T, V any](transformerFn func(T, string, error) (V, string, error), source Value[T]) Value[V] {
return MergeList(func(delegates []Result[T]) (V, string, error) {
if len(delegates) != 1 {
panic(fmt.Errorf("invalid cache for transformer cache: %v", delegates))
}
return transformerFn(caches[0])
}, []Data[T]{source})
return transformerFn(delegates[0].Value, delegates[0].Etag, delegates[0].Err)
}, []Value[T]{source})
}
// NewSource creates a new cache that generates some data. This
// will always be called since we don't know the origin of the data and
// if it needs to be updated or not. sourceFn MUST be thread-safe.
func NewSource[T any](sourceFn func() Result[T]) Data[T] {
c := source[T](sourceFn)
return &c
}
type source[T any] func() Result[T]
func (c *source[T]) Get() Result[T] {
return (*c)()
}
// NewStaticSource creates a new cache that always generates the
// same data. This will only be called once (lazily).
func NewStaticSource[T any](staticFn func() Result[T]) Data[T] {
return &static[T]{
fn: staticFn,
// Once calls Value[T].Get() lazily and only once, even in case of an error result.
func Once[T any](d Value[T]) Value[T] {
return &once[T]{
data: d,
}
}
type static[T any] struct {
type once[T any] struct {
once sync.Once
fn func() Result[T]
data Value[T]
result Result[T]
}
func (c *static[T]) Get() Result[T] {
func (c *once[T]) Get() (T, string, error) {
c.once.Do(func() {
c.result = c.fn()
c.result.Value, c.result.Etag, c.result.Err = c.data.Get()
})
return c.result
return c.result.Value, c.result.Etag, c.result.Err
}
// Replaceable is a cache that carries the result even when the cache is
// replaced. This is the type that should typically be stored in
// structs.
type Replaceable[T any] struct {
cache atomic.Pointer[Data[T]]
result atomic.Pointer[Result[T]]
// Replaceable extends the Value[T] interface with the ability to change the
// underlying Value[T] after construction.
type Replaceable[T any] interface {
Value[T]
Store(Value[T])
}
// Get retrieves the data from the underlying source. [Replaceable]
// implements the [Data] interface itself. This is a pass-through
// that calls the most recent underlying cache. If the cache fails but
// previously had returned a success, that success will be returned
// instead. If the cache fails but we never returned a success, that
// failure is returned.
func (c *Replaceable[T]) Get() Result[T] {
result := (*c.cache.Load()).Get()
// Atomic wraps a Value[T] as an atomic value that can be replaced. It implements
// Replaceable[T].
type Atomic[T any] struct {
value atomic.Pointer[Value[T]]
}
for {
cResult := c.result.Load()
if result.Err != nil && cResult != nil && cResult.Err == nil {
return *cResult
}
if c.result.CompareAndSwap(cResult, &result) {
return result
var _ Replaceable[[]byte] = &Atomic[[]byte]{}
func (x *Atomic[T]) Store(val Value[T]) { x.value.Store(&val) }
func (x *Atomic[T]) Get() (T, string, error) { return (*x.value.Load()).Get() }
// LastSuccess calls Value[T].Get(), but hides errors by returning the last
// success if there has been any.
type LastSuccess[T any] struct {
Atomic[T]
success atomic.Pointer[Result[T]]
}
var _ Replaceable[[]byte] = &LastSuccess[[]byte]{}
func (c *LastSuccess[T]) Get() (T, string, error) {
success := c.success.Load()
value, etag, err := c.Atomic.Get()
if err == nil {
if success == nil {
c.success.CompareAndSwap(nil, &Result[T]{Value: value, Etag: etag, Err: err})
}
return value, etag, err
}
}
// Replace changes the cache.
func (c *Replaceable[T]) Replace(cache Data[T]) {
c.cache.Swap(&cache)
if success != nil {
return success.Value, success.Etag, success.Err
}
return value, etag, err
}

View File

@ -22,7 +22,6 @@ import (
"github.com/emicklei/go-restful/v3"
"k8s.io/kube-openapi/pkg/openapiconv"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
)
@ -172,43 +171,6 @@ type OpenAPIV3Config struct {
DefaultSecurity []map[string][]string
}
// ConvertConfigToV3 converts a Config object to an OpenAPIV3Config object
func ConvertConfigToV3(config *Config) *OpenAPIV3Config {
if config == nil {
return nil
}
v3Config := &OpenAPIV3Config{
Info: config.Info,
IgnorePrefixes: config.IgnorePrefixes,
GetDefinitions: config.GetDefinitions,
GetOperationIDAndTags: config.GetOperationIDAndTags,
GetOperationIDAndTagsFromRoute: config.GetOperationIDAndTagsFromRoute,
GetDefinitionName: config.GetDefinitionName,
Definitions: config.Definitions,
SecuritySchemes: make(spec3.SecuritySchemes),
DefaultSecurity: config.DefaultSecurity,
DefaultResponse: openapiconv.ConvertResponse(config.DefaultResponse, []string{"application/json"}),
CommonResponses: make(map[int]*spec3.Response),
ResponseDefinitions: make(map[string]*spec3.Response),
}
if config.SecurityDefinitions != nil {
for s, securityScheme := range *config.SecurityDefinitions {
v3Config.SecuritySchemes[s] = openapiconv.ConvertSecurityScheme(securityScheme)
}
}
for k, commonResponse := range config.CommonResponses {
v3Config.CommonResponses[k] = openapiconv.ConvertResponse(&commonResponse, []string{"application/json"})
}
for k, responseDefinition := range config.ResponseDefinitions {
v3Config.ResponseDefinitions[k] = openapiconv.ConvertResponse(&responseDefinition, []string{"application/json"})
}
return v3Config
}
type typeInfo struct {
name string
format string

View File

@ -30,6 +30,7 @@ import (
openapi_v2 "github.com/google/gnostic-models/openapiv2"
"github.com/google/uuid"
"github.com/munnerz/goautoneg"
klog "k8s.io/klog/v2"
"k8s.io/kube-openapi/pkg/builder"
"k8s.io/kube-openapi/pkg/cached"
@ -59,52 +60,52 @@ type timedSpec struct {
// OpenAPIService is the service responsible for serving OpenAPI spec. It has
// the ability to safely change the spec while serving it.
type OpenAPIService struct {
specCache cached.Replaceable[*spec.Swagger]
jsonCache cached.Data[timedSpec]
protoCache cached.Data[timedSpec]
specCache cached.LastSuccess[*spec.Swagger]
jsonCache cached.Value[timedSpec]
protoCache cached.Value[timedSpec]
}
// NewOpenAPIService builds an OpenAPIService starting with the given spec.
func NewOpenAPIService(swagger *spec.Swagger) *OpenAPIService {
return NewOpenAPIServiceLazy(cached.NewResultOK(swagger, uuid.New().String()))
return NewOpenAPIServiceLazy(cached.Static(swagger, uuid.New().String()))
}
// NewOpenAPIServiceLazy builds an OpenAPIService from lazy spec.
func NewOpenAPIServiceLazy(swagger cached.Data[*spec.Swagger]) *OpenAPIService {
func NewOpenAPIServiceLazy(swagger cached.Value[*spec.Swagger]) *OpenAPIService {
o := &OpenAPIService{}
o.UpdateSpecLazy(swagger)
o.jsonCache = cached.NewTransformer[*spec.Swagger](func(result cached.Result[*spec.Swagger]) cached.Result[timedSpec] {
if result.Err != nil {
return cached.NewResultErr[timedSpec](result.Err)
}
json, err := result.Data.MarshalJSON()
o.jsonCache = cached.Transform[*spec.Swagger](func(spec *spec.Swagger, etag string, err error) (timedSpec, string, error) {
if err != nil {
return cached.NewResultErr[timedSpec](err)
return timedSpec{}, "", err
}
return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json))
json, err := spec.MarshalJSON()
if err != nil {
return timedSpec{}, "", err
}
return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil
}, &o.specCache)
o.protoCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] {
if result.Err != nil {
return cached.NewResultErr[timedSpec](result.Err)
}
proto, err := ToProtoBinary(result.Data.spec)
o.protoCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) {
if err != nil {
return cached.NewResultErr[timedSpec](err)
return timedSpec{}, "", err
}
proto, err := ToProtoBinary(ts.spec)
if err != nil {
return timedSpec{}, "", err
}
// We can re-use the same etag as json because of the Vary header.
return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag)
return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil
}, o.jsonCache)
return o
}
func (o *OpenAPIService) UpdateSpec(swagger *spec.Swagger) error {
o.UpdateSpecLazy(cached.NewResultOK(swagger, uuid.New().String()))
o.UpdateSpecLazy(cached.Static(swagger, uuid.New().String()))
return nil
}
func (o *OpenAPIService) UpdateSpecLazy(swagger cached.Data[*spec.Swagger]) {
o.specCache.Replace(swagger)
func (o *OpenAPIService) UpdateSpecLazy(swagger cached.Value[*spec.Swagger]) {
o.specCache.Store(swagger)
}
func ToProtoBinary(json []byte) ([]byte, error) {
@ -130,7 +131,7 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
Type string
SubType string
ReturnedContentType string
GetDataAndEtag cached.Data[timedSpec]
GetDataAndEtag cached.Value[timedSpec]
}{
{"application", subTypeJSON, "application/" + subTypeJSON, o.jsonCache},
{"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf, o.protoCache},
@ -154,11 +155,11 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
continue
}
// serve the first matching media type in the sorted clause list
result := accepts.GetDataAndEtag.Get()
if result.Err != nil {
klog.Errorf("Error in OpenAPI handler: %s", result.Err)
ts, etag, err := accepts.GetDataAndEtag.Get()
if err != nil {
klog.Errorf("Error in OpenAPI handler: %s", err)
// only return a 503 if we have no older cache data to serve
if result.Data.spec == nil {
if ts.spec == nil {
w.WriteHeader(http.StatusServiceUnavailable)
return
}
@ -167,9 +168,9 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
w.Header().Set("Content-Type", accepts.ReturnedContentType)
// ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
w.Header().Set("Etag", strconv.Quote(result.Etag))
w.Header().Set("Etag", strconv.Quote(etag))
// ServeContent will take care of caching using eTag.
http.ServeContent(w, r, servePath, result.Data.lastModified, bytes.NewReader(result.Data.spec))
http.ServeContent(w, r, servePath, ts.lastModified, bytes.NewReader(ts.spec))
return
}
}

View File

@ -33,6 +33,7 @@ import (
openapi_v3 "github.com/google/gnostic-models/openapiv3"
"github.com/google/uuid"
"github.com/munnerz/goautoneg"
"k8s.io/klog/v2"
"k8s.io/kube-openapi/pkg/cached"
"k8s.io/kube-openapi/pkg/common"
@ -73,38 +74,38 @@ type timedSpec struct {
// This type is protected by the lock on OpenAPIService.
type openAPIV3Group struct {
specCache cached.Replaceable[*spec3.OpenAPI]
pbCache cached.Data[timedSpec]
jsonCache cached.Data[timedSpec]
specCache cached.LastSuccess[*spec3.OpenAPI]
pbCache cached.Value[timedSpec]
jsonCache cached.Value[timedSpec]
}
func newOpenAPIV3Group() *openAPIV3Group {
o := &openAPIV3Group{}
o.jsonCache = cached.NewTransformer[*spec3.OpenAPI](func(result cached.Result[*spec3.OpenAPI]) cached.Result[timedSpec] {
if result.Err != nil {
return cached.NewResultErr[timedSpec](result.Err)
}
json, err := json.Marshal(result.Data)
o.jsonCache = cached.Transform[*spec3.OpenAPI](func(spec *spec3.OpenAPI, etag string, err error) (timedSpec, string, error) {
if err != nil {
return cached.NewResultErr[timedSpec](err)
return timedSpec{}, "", err
}
return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json))
json, err := json.Marshal(spec)
if err != nil {
return timedSpec{}, "", err
}
return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil
}, &o.specCache)
o.pbCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] {
if result.Err != nil {
return cached.NewResultErr[timedSpec](result.Err)
}
proto, err := ToV3ProtoBinary(result.Data.spec)
o.pbCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) {
if err != nil {
return cached.NewResultErr[timedSpec](err)
return timedSpec{}, "", err
}
return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag)
proto, err := ToV3ProtoBinary(ts.spec)
if err != nil {
return timedSpec{}, "", err
}
return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil
}, o.jsonCache)
return o
}
func (o *openAPIV3Group) UpdateSpec(openapi cached.Data[*spec3.OpenAPI]) {
o.specCache.Replace(openapi)
func (o *openAPIV3Group) UpdateSpec(openapi cached.Value[*spec3.OpenAPI]) {
o.specCache.Store(openapi)
}
// OpenAPIService is the service responsible for serving OpenAPI spec. It has
@ -114,7 +115,7 @@ type OpenAPIService struct {
mutex sync.Mutex
v3Schema map[string]*openAPIV3Group
discoveryCache cached.Replaceable[timedSpec]
discoveryCache cached.LastSuccess[timedSpec]
}
func computeETag(data []byte) string {
@ -137,20 +138,20 @@ func NewOpenAPIService() *OpenAPIService {
o := &OpenAPIService{}
o.v3Schema = make(map[string]*openAPIV3Group)
// We're not locked because we haven't shared the structure yet.
o.discoveryCache.Replace(o.buildDiscoveryCacheLocked())
o.discoveryCache.Store(o.buildDiscoveryCacheLocked())
return o
}
func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] {
caches := make(map[string]cached.Data[timedSpec], len(o.v3Schema))
func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] {
caches := make(map[string]cached.Value[timedSpec], len(o.v3Schema))
for gvName, group := range o.v3Schema {
caches[gvName] = group.jsonCache
}
return cached.NewMerger(func(results map[string]cached.Result[timedSpec]) cached.Result[timedSpec] {
return cached.Merge(func(results map[string]cached.Result[timedSpec]) (timedSpec, string, error) {
discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)}
for gvName, result := range results {
if result.Err != nil {
return cached.NewResultErr[timedSpec](result.Err)
return timedSpec{}, "", result.Err
}
discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{
ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag),
@ -158,9 +159,9 @@ func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] {
}
j, err := json.Marshal(discovery)
if err != nil {
return cached.NewResultErr[timedSpec](err)
return timedSpec{}, "", err
}
return cached.NewResultOK(timedSpec{spec: j, lastModified: time.Now()}, computeETag(j))
return timedSpec{spec: j, lastModified: time.Now()}, computeETag(j), nil
}, caches)
}
@ -171,32 +172,32 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by
if !ok {
return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group)
}
result := cached.Result[timedSpec]{}
switch getType {
case subTypeJSON:
result = v.jsonCache.Get()
ts, etag, err := v.jsonCache.Get()
return ts.spec, etag, ts.lastModified, err
case subTypeProtobuf, subTypeProtobufDeprecated:
result = v.pbCache.Get()
ts, etag, err := v.pbCache.Get()
return ts.spec, etag, ts.lastModified, err
default:
return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType)
}
return result.Data.spec, result.Etag, result.Data.lastModified, result.Err
}
// UpdateGroupVersionLazy adds or updates an existing group with the new cached.
func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Data[*spec3.OpenAPI]) {
func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Value[*spec3.OpenAPI]) {
o.mutex.Lock()
defer o.mutex.Unlock()
if _, ok := o.v3Schema[group]; !ok {
o.v3Schema[group] = newOpenAPIV3Group()
// Since there is a new item, we need to re-build the cache map.
o.discoveryCache.Replace(o.buildDiscoveryCacheLocked())
o.discoveryCache.Store(o.buildDiscoveryCacheLocked())
}
o.v3Schema[group].UpdateSpec(openapi)
}
func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) {
o.UpdateGroupVersionLazy(group, cached.NewResultOK(openapi, uuid.New().String()))
o.UpdateGroupVersionLazy(group, cached.Static(openapi, uuid.New().String()))
}
func (o *OpenAPIService) DeleteGroupVersion(group string) {
@ -204,19 +205,19 @@ func (o *OpenAPIService) DeleteGroupVersion(group string) {
defer o.mutex.Unlock()
delete(o.v3Schema, group)
// Rebuild the merge cache map since the items have changed.
o.discoveryCache.Replace(o.buildDiscoveryCacheLocked())
o.discoveryCache.Store(o.buildDiscoveryCacheLocked())
}
func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) {
result := o.discoveryCache.Get()
if result.Err != nil {
klog.Errorf("Error serving discovery: %s", result.Err)
ts, etag, err := o.discoveryCache.Get()
if err != nil {
klog.Errorf("Error serving discovery: %s", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Etag", strconv.Quote(result.Etag))
w.Header().Set("Etag", strconv.Quote(etag))
w.Header().Set("Content-Type", "application/json")
http.ServeContent(w, r, "/openapi/v3", result.Data.lastModified, bytes.NewReader(result.Data.spec))
http.ServeContent(w, r, "/openapi/v3", ts.lastModified, bytes.NewReader(ts.spec))
}
func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) {

View File

@ -22,3 +22,4 @@ var UseOptimizedJSONUnmarshalingV3 bool = true
// Used by tests to selectively disable experimental JSON marshaler
var UseOptimizedJSONMarshaling bool = true
var UseOptimizedJSONMarshalingV3 bool = true

View File

@ -1,322 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openapiconv
import (
"strings"
klog "k8s.io/klog/v2"
builderutil "k8s.io/kube-openapi/pkg/builder3/util"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
)
var OpenAPIV2DefPrefix = "#/definitions/"
var OpenAPIV3DefPrefix = "#/components/schemas/"
// ConvertV2ToV3 converts an OpenAPI V2 object into V3.
// Certain references may be shared between the V2 and V3 objects in the conversion.
func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI {
v3Spec := &spec3.OpenAPI{
Version: "3.0.0",
Info: v2Spec.Info,
ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs),
Paths: ConvertPaths(v2Spec.Paths),
Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces),
}
return v3Spec
}
func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation {
if v2ED == nil {
return nil
}
return &spec3.ExternalDocumentation{
ExternalDocumentationProps: spec3.ExternalDocumentationProps{
Description: v2ED.Description,
URL: v2ED.URL,
},
}
}
func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components {
components := &spec3.Components{}
if v2Definitions != nil {
components.Schemas = make(map[string]*spec.Schema)
}
for s, schema := range v2Definitions {
components.Schemas[s] = ConvertSchema(&schema)
}
if v2SecurityDefinitions != nil {
components.SecuritySchemes = make(spec3.SecuritySchemes)
}
for s, securityScheme := range v2SecurityDefinitions {
components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme)
}
if v2Responses != nil {
components.Responses = make(map[string]*spec3.Response)
}
for r, response := range v2Responses {
components.Responses[r] = ConvertResponse(&response, produces)
}
return components
}
func ConvertSchema(v2Schema *spec.Schema) *spec.Schema {
if v2Schema == nil {
return nil
}
v3Schema := spec.Schema{
VendorExtensible: v2Schema.VendorExtensible,
SchemaProps: v2Schema.SchemaProps,
SwaggerSchemaProps: v2Schema.SwaggerSchemaProps,
ExtraProps: v2Schema.ExtraProps,
}
if refString := v2Schema.Ref.String(); refString != "" {
if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 {
v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):])
} else {
klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString)
}
}
if v2Schema.Properties != nil {
v3Schema.Properties = make(map[string]spec.Schema)
for key, property := range v2Schema.Properties {
v3Schema.Properties[key] = *ConvertSchema(&property)
}
}
if v2Schema.Items != nil {
v3Schema.Items = &spec.SchemaOrArray{
Schema: ConvertSchema(v2Schema.Items.Schema),
Schemas: ConvertSchemaList(v2Schema.Items.Schemas),
}
}
if v2Schema.AdditionalProperties != nil {
v3Schema.AdditionalProperties = &spec.SchemaOrBool{
Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema),
Allows: v2Schema.AdditionalProperties.Allows,
}
}
if v2Schema.AdditionalItems != nil {
v3Schema.AdditionalItems = &spec.SchemaOrBool{
Schema: ConvertSchema(v2Schema.AdditionalItems.Schema),
Allows: v2Schema.AdditionalItems.Allows,
}
}
return builderutil.WrapRefs(&v3Schema)
}
func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema {
if v2SchemaList == nil {
return nil
}
v3SchemaList := []spec.Schema{}
for _, s := range v2SchemaList {
v3SchemaList = append(v3SchemaList, *ConvertSchema(&s))
}
return v3SchemaList
}
func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme {
if v2securityScheme == nil {
return nil
}
securityScheme := &spec3.SecurityScheme{
VendorExtensible: v2securityScheme.VendorExtensible,
SecuritySchemeProps: spec3.SecuritySchemeProps{
Description: v2securityScheme.Description,
Type: v2securityScheme.Type,
Name: v2securityScheme.Name,
In: v2securityScheme.In,
},
}
if v2securityScheme.Flow != "" {
securityScheme.Flows = make(map[string]*spec3.OAuthFlow)
securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{
OAuthFlowProps: spec3.OAuthFlowProps{
AuthorizationUrl: v2securityScheme.AuthorizationURL,
TokenUrl: v2securityScheme.TokenURL,
Scopes: v2securityScheme.Scopes,
},
}
}
return securityScheme
}
func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths {
if v2Paths == nil {
return nil
}
paths := &spec3.Paths{
VendorExtensible: v2Paths.VendorExtensible,
}
if v2Paths.Paths != nil {
paths.Paths = make(map[string]*spec3.Path)
}
for k, v := range v2Paths.Paths {
paths.Paths[k] = ConvertPathItem(v)
}
return paths
}
func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path {
path := &spec3.Path{
Refable: v2pathItem.Refable,
PathProps: spec3.PathProps{
Get: ConvertOperation(v2pathItem.Get),
Put: ConvertOperation(v2pathItem.Put),
Post: ConvertOperation(v2pathItem.Post),
Delete: ConvertOperation(v2pathItem.Delete),
Options: ConvertOperation(v2pathItem.Options),
Head: ConvertOperation(v2pathItem.Head),
Patch: ConvertOperation(v2pathItem.Patch),
},
VendorExtensible: v2pathItem.VendorExtensible,
}
for _, param := range v2pathItem.Parameters {
path.Parameters = append(path.Parameters, ConvertParameter(param))
}
return path
}
func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation {
if v2Operation == nil {
return nil
}
operation := &spec3.Operation{
VendorExtensible: v2Operation.VendorExtensible,
OperationProps: spec3.OperationProps{
Description: v2Operation.Description,
ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs),
Tags: v2Operation.Tags,
Summary: v2Operation.Summary,
Deprecated: v2Operation.Deprecated,
OperationId: v2Operation.ID,
},
}
for _, param := range v2Operation.Parameters {
if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil {
operation.OperationProps.RequestBody = &spec3.RequestBody{
RequestBodyProps: spec3.RequestBodyProps{},
}
if v2Operation.Consumes != nil {
operation.RequestBody.Content = make(map[string]*spec3.MediaType)
}
for _, consumer := range v2Operation.Consumes {
operation.RequestBody.Content[consumer] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: ConvertSchema(param.ParamProps.Schema),
},
}
}
} else {
operation.Parameters = append(operation.Parameters, ConvertParameter(param))
}
}
operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{
Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces),
},
VendorExtensible: v2Operation.Responses.VendorExtensible,
}
if v2Operation.Responses.StatusCodeResponses != nil {
operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response)
}
for k, v := range v2Operation.Responses.StatusCodeResponses {
operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces)
}
return operation
}
func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response {
if v2Response == nil {
return nil
}
response := &spec3.Response{
Refable: ConvertRefableResponse(v2Response.Refable),
VendorExtensible: v2Response.VendorExtensible,
ResponseProps: spec3.ResponseProps{
Description: v2Response.Description,
},
}
if v2Response.Schema != nil {
if produces != nil {
response.Content = make(map[string]*spec3.MediaType)
}
for _, producer := range produces {
response.ResponseProps.Content[producer] = &spec3.MediaType{
MediaTypeProps: spec3.MediaTypeProps{
Schema: ConvertSchema(v2Response.Schema),
},
}
}
}
return response
}
func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter {
param := &spec3.Parameter{
Refable: ConvertRefableParameter(v2Param.Refable),
VendorExtensible: v2Param.VendorExtensible,
ParameterProps: spec3.ParameterProps{
Name: v2Param.Name,
Description: v2Param.Description,
In: v2Param.In,
Required: v2Param.Required,
Schema: ConvertSchema(v2Param.Schema),
AllowEmptyValue: v2Param.AllowEmptyValue,
},
}
// Convert SimpleSchema into Schema
if param.Schema == nil {
param.Schema = &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{v2Param.Type},
Format: v2Param.Format,
UniqueItems: v2Param.UniqueItems,
},
}
}
return param
}
func ConvertRefableParameter(refable spec.Refable) spec.Refable {
if refable.Ref.String() != "" {
return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))}
}
return refable
}
func ConvertRefableResponse(refable spec.Refable) spec.Refable {
if refable.Ref.String() != "" {
return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))}
}
return refable
}

View File

@ -32,6 +32,9 @@ type Encoding struct {
// MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON
func (e *Encoding) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(e)
}
b1, err := json.Marshal(e.EncodingProps)
if err != nil {
return nil, err
@ -43,6 +46,16 @@ func (e *Encoding) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2), nil
}
func (e *Encoding) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
EncodingProps encodingPropsOmitZero `json:",inline"`
spec.Extensions
}
x.Extensions = internal.SanitizeExtensions(e.Extensions)
x.EncodingProps = encodingPropsOmitZero(e.EncodingProps)
return opts.MarshalNext(enc, x)
}
func (e *Encoding) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, e)
@ -82,3 +95,11 @@ type EncodingProps struct {
// AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986
AllowReserved bool `json:"allowReserved,omitempty"`
}
type encodingPropsOmitZero struct {
ContentType string `json:"contentType,omitempty"`
Headers map[string]*Header `json:"headers,omitempty"`
Style string `json:"style,omitempty"`
Explode bool `json:"explode,omitzero"`
AllowReserved bool `json:"allowReserved,omitzero"`
}

View File

@ -36,6 +36,9 @@ type Example struct {
// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON
func (e *Example) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(e)
}
b1, err := json.Marshal(e.Refable)
if err != nil {
return nil, err
@ -50,6 +53,17 @@ func (e *Example) MarshalJSON() ([]byte, error) {
}
return swag.ConcatJSON(b1, b2, b3), nil
}
func (e *Example) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
ExampleProps `json:",inline"`
spec.Extensions
}
x.Ref = e.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(e.Extensions)
x.ExampleProps = e.ExampleProps
return opts.MarshalNext(enc, x)
}
func (e *Example) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {

View File

@ -39,6 +39,9 @@ type ExternalDocumentationProps struct {
// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON
func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(e)
}
b1, err := json.Marshal(e.ExternalDocumentationProps)
if err != nil {
return nil, err
@ -50,6 +53,16 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2), nil
}
func (e *ExternalDocumentation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
ExternalDocumentationProps `json:",inline"`
spec.Extensions
}
x.Extensions = internal.SanitizeExtensions(e.Extensions)
x.ExternalDocumentationProps = e.ExternalDocumentationProps
return opts.MarshalNext(enc, x)
}
func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, e)

View File

@ -35,6 +35,18 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
func(o *OpenAPI, c fuzz.Continue) {
c.FuzzNoCustom(o)
o.Version = "3.0.0"
for i, val := range o.SecurityRequirement {
if val == nil {
o.SecurityRequirement[i] = make(map[string][]string)
}
for k, v := range val {
if v == nil {
val[k] = make([]string, 0)
}
}
}
},
func(r *interface{}, c fuzz.Continue) {
switch c.Intn(3) {
@ -169,6 +181,21 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{
c.Fuzz(&v.ResponseProps)
c.Fuzz(&v.VendorExtensible)
},
func(v *Operation, c fuzz.Continue) {
c.FuzzNoCustom(v)
// Do not fuzz null values into the array.
for i, val := range v.SecurityRequirement {
if val == nil {
v.SecurityRequirement[i] = make(map[string][]string)
}
for k, v := range val {
if v == nil {
val[k] = make([]string, 0)
}
}
}
},
func(v *spec.Extensions, c fuzz.Continue) {
numChildren := c.Intn(5)
for i := 0; i < numChildren; i++ {

View File

@ -36,6 +36,9 @@ type Header struct {
// MarshalJSON is a custom marshal function that knows how to encode Header as JSON
func (h *Header) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(h)
}
b1, err := json.Marshal(h.Refable)
if err != nil {
return nil, err
@ -51,6 +54,18 @@ func (h *Header) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2, b3), nil
}
func (h *Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
HeaderProps headerPropsOmitZero `json:",inline"`
spec.Extensions
}
x.Ref = h.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(h.Extensions)
x.HeaderProps = headerPropsOmitZero(h.HeaderProps)
return opts.MarshalNext(enc, x)
}
func (h *Header) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, h)
@ -109,3 +124,19 @@ type HeaderProps struct {
// Examples of the header
Examples map[string]*Example `json:"examples,omitempty"`
}
// Marshaling structure only, always edit along with corresponding
// struct (or compilation will fail).
type headerPropsOmitZero struct {
Description string `json:"description,omitempty"`
Required bool `json:"required,omitzero"`
Deprecated bool `json:"deprecated,omitzero"`
AllowEmptyValue bool `json:"allowEmptyValue,omitzero"`
Style string `json:"style,omitempty"`
Explode bool `json:"explode,omitzero"`
AllowReserved bool `json:"allowReserved,omitzero"`
Schema *spec.Schema `json:"schema,omitzero"`
Content map[string]*MediaType `json:"content,omitempty"`
Example interface{} `json:"example,omitempty"`
Examples map[string]*Example `json:"examples,omitempty"`
}

View File

@ -35,6 +35,9 @@ type MediaType struct {
// MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON
func (m *MediaType) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(m)
}
b1, err := json.Marshal(m.MediaTypeProps)
if err != nil {
return nil, err
@ -46,6 +49,16 @@ func (m *MediaType) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2), nil
}
func (e *MediaType) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
MediaTypeProps mediaTypePropsOmitZero `json:",inline"`
spec.Extensions
}
x.Extensions = internal.SanitizeExtensions(e.Extensions)
x.MediaTypeProps = mediaTypePropsOmitZero(e.MediaTypeProps)
return opts.MarshalNext(enc, x)
}
func (m *MediaType) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, m)
@ -84,3 +97,10 @@ type MediaTypeProps struct {
// A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded
Encoding map[string]*Encoding `json:"encoding,omitempty"`
}
type mediaTypePropsOmitZero struct {
Schema *spec.Schema `json:"schema,omitzero"`
Example interface{} `json:"example,omitempty"`
Examples map[string]*Example `json:"examples,omitempty"`
Encoding map[string]*Encoding `json:"encoding,omitempty"`
}

View File

@ -35,6 +35,9 @@ type Operation struct {
// MarshalJSON is a custom marshal function that knows how to encode Operation as JSON
func (o *Operation) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(o)
}
b1, err := json.Marshal(o.OperationProps)
if err != nil {
return nil, err
@ -46,6 +49,16 @@ func (o *Operation) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2), nil
}
func (o *Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
spec.Extensions
OperationProps operationPropsOmitZero `json:",inline"`
}
x.Extensions = internal.SanitizeExtensions(o.Extensions)
x.OperationProps = operationPropsOmitZero(o.OperationProps)
return opts.MarshalNext(enc, x)
}
// UnmarshalJSON hydrates this items instance with the data from JSON
func (o *Operation) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
@ -95,3 +108,17 @@ type OperationProps struct {
// Servers contains an alternative server array to service this operation
Servers []*Server `json:"servers,omitempty"`
}
type operationPropsOmitZero struct {
Tags []string `json:"tags,omitempty"`
Summary string `json:"summary,omitempty"`
Description string `json:"description,omitempty"`
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"`
OperationId string `json:"operationId,omitempty"`
Parameters []*Parameter `json:"parameters,omitempty"`
RequestBody *RequestBody `json:"requestBody,omitzero"`
Responses *Responses `json:"responses,omitzero"`
Deprecated bool `json:"deprecated,omitzero"`
SecurityRequirement []map[string][]string `json:"security,omitempty"`
Servers []*Server `json:"servers,omitempty"`
}

View File

@ -36,6 +36,9 @@ type Parameter struct {
// MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON
func (p *Parameter) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(p)
}
b1, err := json.Marshal(p.Refable)
if err != nil {
return nil, err
@ -51,6 +54,18 @@ func (p *Parameter) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2, b3), nil
}
func (p *Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
ParameterProps parameterPropsOmitZero `json:",inline"`
spec.Extensions
}
x.Ref = p.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(p.Extensions)
x.ParameterProps = parameterPropsOmitZero(p.ParameterProps)
return opts.MarshalNext(enc, x)
}
func (p *Parameter) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, p)
@ -114,3 +129,19 @@ type ParameterProps struct {
// Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding
Examples map[string]*Example `json:"examples,omitempty"`
}
type parameterPropsOmitZero struct {
Name string `json:"name,omitempty"`
In string `json:"in,omitempty"`
Description string `json:"description,omitempty"`
Required bool `json:"required,omitzero"`
Deprecated bool `json:"deprecated,omitzero"`
AllowEmptyValue bool `json:"allowEmptyValue,omitzero"`
Style string `json:"style,omitempty"`
Explode bool `json:"explode,omitzero"`
AllowReserved bool `json:"allowReserved,omitzero"`
Schema *spec.Schema `json:"schema,omitzero"`
Content map[string]*MediaType `json:"content,omitempty"`
Example interface{} `json:"example,omitempty"`
Examples map[string]*Example `json:"examples,omitempty"`
}

View File

@ -35,15 +35,41 @@ type Paths struct {
// MarshalJSON is a custom marshal function that knows how to encode Paths as JSON
func (p *Paths) MarshalJSON() ([]byte, error) {
b1, err := json.Marshal(p.Paths)
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(p)
}
b1, err := json.Marshal(p.VendorExtensible)
if err != nil {
return nil, err
}
b2, err := json.Marshal(p.VendorExtensible)
pths := make(map[string]*Path)
for k, v := range p.Paths {
if strings.HasPrefix(k, "/") {
pths[k] = v
}
}
b2, err := json.Marshal(pths)
if err != nil {
return nil, err
}
return swag.ConcatJSON(b1, b2), nil
concated := swag.ConcatJSON(b1, b2)
return concated, nil
}
func (p *Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
m := make(map[string]any, len(p.Extensions)+len(p.Paths))
for k, v := range p.Extensions {
if internal.IsExtensionKey(k) {
m[k] = v
}
}
for k, v := range p.Paths {
if strings.HasPrefix(k, "/") {
m[k] = v
}
}
return opts.MarshalNext(enc, m)
}
// UnmarshalJSON hydrates this items instance with the data from JSON
@ -144,6 +170,9 @@ type Path struct {
// MarshalJSON is a custom marshal function that knows how to encode Path as JSON
func (p *Path) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(p)
}
b1, err := json.Marshal(p.Refable)
if err != nil {
return nil, err
@ -159,6 +188,18 @@ func (p *Path) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2, b3), nil
}
func (p *Path) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
spec.Extensions
PathProps
}
x.Ref = p.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(p.Extensions)
x.PathProps = p.PathProps
return opts.MarshalNext(enc, x)
}
func (p *Path) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, p)

View File

@ -36,6 +36,9 @@ type RequestBody struct {
// MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON
func (r *RequestBody) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(r)
}
b1, err := json.Marshal(r.Refable)
if err != nil {
return nil, err
@ -51,6 +54,18 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2, b3), nil
}
func (r *RequestBody) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
RequestBodyProps requestBodyPropsOmitZero `json:",inline"`
spec.Extensions
}
x.Ref = r.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(r.Extensions)
x.RequestBodyProps = requestBodyPropsOmitZero(r.RequestBodyProps)
return opts.MarshalNext(enc, x)
}
func (r *RequestBody) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, r)
@ -77,6 +92,12 @@ type RequestBodyProps struct {
Required bool `json:"required,omitempty"`
}
type requestBodyPropsOmitZero struct {
Description string `json:"description,omitempty"`
Content map[string]*MediaType `json:"content,omitempty"`
Required bool `json:"required,omitzero"`
}
func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error {
var x struct {
spec.Extensions

View File

@ -37,6 +37,9 @@ type Responses struct {
// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON
func (r *Responses) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(r)
}
b1, err := json.Marshal(r.ResponsesProps)
if err != nil {
return nil, err
@ -48,6 +51,25 @@ func (r *Responses) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2), nil
}
func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
type ArbitraryKeys map[string]interface{}
var x struct {
ArbitraryKeys
Default *Response `json:"default,omitzero"`
}
x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses))
for k, v := range r.Extensions {
if internal.IsExtensionKey(k) {
x.ArbitraryKeys[k] = v
}
}
for k, v := range r.StatusCodeResponses {
x.ArbitraryKeys[strconv.Itoa(k)] = v
}
x.Default = r.Default
return opts.MarshalNext(enc, x)
}
func (r *Responses) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, r)
@ -179,6 +201,9 @@ type Response struct {
// MarshalJSON is a custom marshal function that knows how to encode Response as JSON
func (r *Response) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(r)
}
b1, err := json.Marshal(r.Refable)
if err != nil {
return nil, err
@ -194,6 +219,18 @@ func (r *Response) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2, b3), nil
}
func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
spec.Extensions
ResponseProps `json:",inline"`
}
x.Ref = r.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(r.Extensions)
x.ResponseProps = r.ResponseProps
return opts.MarshalNext(enc, x)
}
func (r *Response) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, r)
@ -247,6 +284,9 @@ type Link struct {
// MarshalJSON is a custom marshal function that knows how to encode Link as JSON
func (r *Link) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(r)
}
b1, err := json.Marshal(r.Refable)
if err != nil {
return nil, err
@ -262,6 +302,18 @@ func (r *Link) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2, b3), nil
}
func (r *Link) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
spec.Extensions
LinkProps `json:",inline"`
}
x.Ref = r.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(r.Extensions)
x.LinkProps = r.LinkProps
return opts.MarshalNext(enc, x)
}
func (r *Link) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, r)

View File

@ -20,6 +20,8 @@ import (
"encoding/json"
"github.com/go-openapi/swag"
"k8s.io/kube-openapi/pkg/internal"
jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json"
"k8s.io/kube-openapi/pkg/validation/spec"
)
@ -32,6 +34,9 @@ type SecurityScheme struct {
// MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON
func (s *SecurityScheme) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(s)
}
b1, err := json.Marshal(s.SecuritySchemeProps)
if err != nil {
return nil, err
@ -47,6 +52,18 @@ func (s *SecurityScheme) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2, b3), nil
}
func (s *SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
Ref string `json:"$ref,omitempty"`
SecuritySchemeProps `json:",inline"`
spec.Extensions
}
x.Ref = s.Refable.Ref.String()
x.Extensions = internal.SanitizeExtensions(s.Extensions)
x.SecuritySchemeProps = s.SecuritySchemeProps
return opts.MarshalNext(enc, x)
}
// UnmarshalJSON hydrates this items instance with the data from JSON
func (s *SecurityScheme) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil {

View File

@ -41,6 +41,9 @@ type ServerProps struct {
// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON
func (s *Server) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(s)
}
b1, err := json.Marshal(s.ServerProps)
if err != nil {
return nil, err
@ -52,6 +55,16 @@ func (s *Server) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2), nil
}
func (s *Server) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
ServerProps `json:",inline"`
spec.Extensions
}
x.Extensions = internal.SanitizeExtensions(s.Extensions)
x.ServerProps = s.ServerProps
return opts.MarshalNext(enc, x)
}
func (s *Server) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, s)
@ -96,6 +109,9 @@ type ServerVariableProps struct {
// MarshalJSON is a custom marshal function that knows how to encode Responses as JSON
func (s *ServerVariable) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(s)
}
b1, err := json.Marshal(s.ServerVariableProps)
if err != nil {
return nil, err
@ -107,6 +123,16 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) {
return swag.ConcatJSON(b1, b2), nil
}
func (s *ServerVariable) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
var x struct {
ServerVariableProps `json:",inline"`
spec.Extensions
}
x.Extensions = internal.SanitizeExtensions(s.Extensions)
x.ServerVariableProps = s.ServerVariableProps
return opts.MarshalNext(enc, x)
}
func (s *ServerVariable) UnmarshalJSON(data []byte) error {
if internal.UseOptimizedJSONUnmarshalingV3 {
return jsonv2.Unmarshal(data, s)

View File

@ -36,6 +36,8 @@ type OpenAPI struct {
Servers []*Server `json:"servers,omitempty"`
// Components hold various schemas for the specification
Components *Components `json:"components,omitempty"`
// SecurityRequirement holds a declaration of which security mechanisms can be used across the API
SecurityRequirement []map[string][]string `json:"security,omitempty"`
// ExternalDocs holds additional external documentation
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
}
@ -48,3 +50,26 @@ func (o *OpenAPI) UnmarshalJSON(data []byte) error {
}
return json.Unmarshal(data, &p)
}
func (o *OpenAPI) MarshalJSON() ([]byte, error) {
if internal.UseOptimizedJSONMarshalingV3 {
return internal.DeterministicMarshal(o)
}
type OpenAPIWithNoFunctions OpenAPI
p := (*OpenAPIWithNoFunctions)(o)
return json.Marshal(&p)
}
func (o *OpenAPI) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error {
type OpenAPIOmitZero struct {
Version string `json:"openapi"`
Info *spec.Info `json:"info"`
Paths *Paths `json:"paths,omitzero"`
Servers []*Server `json:"servers,omitempty"`
Components *Components `json:"components,omitzero"`
SecurityRequirement []map[string][]string `json:"security,omitempty"`
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"`
}
x := (*OpenAPIOmitZero)(o)
return opts.MarshalNext(enc, x)
}

View File

@ -1,502 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spec
import (
"github.com/go-openapi/jsonreference"
"github.com/google/go-cmp/cmp"
fuzz "github.com/google/gofuzz"
)
var SwaggerFuzzFuncs []interface{} = []interface{}{
func(v *Responses, c fuzz.Continue) {
c.FuzzNoCustom(v)
if v.Default != nil {
// Check if we hit maxDepth and left an incomplete value
if v.Default.Description == "" {
v.Default = nil
v.StatusCodeResponses = nil
}
}
// conversion has no way to discern empty statusCodeResponses from
// nil, since "default" is always included in the map.
// So avoid empty responses list
if len(v.StatusCodeResponses) == 0 {
v.StatusCodeResponses = nil
}
},
func(v *Operation, c fuzz.Continue) {
c.FuzzNoCustom(v)
if v != nil {
// force non-nil
v.Responses = &Responses{}
c.Fuzz(v.Responses)
v.Schemes = nil
if c.RandBool() {
v.Schemes = append(v.Schemes, "http")
}
if c.RandBool() {
v.Schemes = append(v.Schemes, "https")
}
if c.RandBool() {
v.Schemes = append(v.Schemes, "ws")
}
if c.RandBool() {
v.Schemes = append(v.Schemes, "wss")
}
// Gnostic unconditionally makes security values non-null
// So do not fuzz null values into the array.
for i, val := range v.Security {
if val == nil {
v.Security[i] = make(map[string][]string)
}
for k, v := range val {
if v == nil {
val[k] = make([]string, 0)
}
}
}
}
},
func(v map[int]Response, c fuzz.Continue) {
n := 0
c.Fuzz(&n)
if n == 0 {
// Test that fuzzer is not at maxDepth so we do not
// end up with empty elements
return
}
// Prevent negative numbers
num := c.Intn(4)
for i := 0; i < num+2; i++ {
val := Response{}
c.Fuzz(&val)
val.Description = c.RandString() + "x"
v[100*(i+1)+c.Intn(100)] = val
}
},
func(v map[string]PathItem, c fuzz.Continue) {
n := 0
c.Fuzz(&n)
if n == 0 {
// Test that fuzzer is not at maxDepth so we do not
// end up with empty elements
return
}
num := c.Intn(5)
for i := 0; i < num+2; i++ {
val := PathItem{}
c.Fuzz(&val)
// Ref params are only allowed in certain locations, so
// possibly add a few to PathItems
numRefsToAdd := c.Intn(5)
for i := 0; i < numRefsToAdd; i++ {
theRef := Parameter{}
c.Fuzz(&theRef.Refable)
val.Parameters = append(val.Parameters, theRef)
}
v["/"+c.RandString()] = val
}
},
func(v *SchemaOrArray, c fuzz.Continue) {
*v = SchemaOrArray{}
// gnostic parser just doesn't support more
// than one Schema here
v.Schema = &Schema{}
c.Fuzz(&v.Schema)
},
func(v *SchemaOrBool, c fuzz.Continue) {
*v = SchemaOrBool{}
if c.RandBool() {
v.Allows = c.RandBool()
} else {
v.Schema = &Schema{}
v.Allows = true
c.Fuzz(&v.Schema)
}
},
func(v map[string]Response, c fuzz.Continue) {
n := 0
c.Fuzz(&n)
if n == 0 {
// Test that fuzzer is not at maxDepth so we do not
// end up with empty elements
return
}
// Response definitions are not allowed to
// be refs
for i := 0; i < c.Intn(5)+1; i++ {
resp := &Response{}
c.Fuzz(resp)
resp.Ref = Ref{}
resp.Description = c.RandString() + "x"
// Response refs are not vendor extensible by gnostic
resp.VendorExtensible.Extensions = nil
v[c.RandString()+"x"] = *resp
}
},
func(v *Header, c fuzz.Continue) {
if v != nil {
c.FuzzNoCustom(v)
// descendant Items of Header may not be refs
cur := v.Items
for cur != nil {
cur.Ref = Ref{}
cur = cur.Items
}
}
},
func(v *Ref, c fuzz.Continue) {
*v = Ref{}
v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString())
},
func(v *Response, c fuzz.Continue) {
*v = Response{}
if c.RandBool() {
v.Ref = Ref{}
v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString())
} else {
c.Fuzz(&v.VendorExtensible)
c.Fuzz(&v.Schema)
c.Fuzz(&v.ResponseProps)
v.Headers = nil
v.Ref = Ref{}
n := 0
c.Fuzz(&n)
if n != 0 {
// Test that fuzzer is not at maxDepth so we do not
// end up with empty elements
num := c.Intn(4)
for i := 0; i < num; i++ {
if v.Headers == nil {
v.Headers = make(map[string]Header)
}
hdr := Header{}
c.Fuzz(&hdr)
if hdr.Type == "" {
// hit maxDepth, just abort trying to make haders
v.Headers = nil
break
}
v.Headers[c.RandString()+"x"] = hdr
}
} else {
v.Headers = nil
}
}
v.Description = c.RandString() + "x"
// Gnostic parses empty as nil, so to keep avoid putting empty
if len(v.Headers) == 0 {
v.Headers = nil
}
},
func(v **Info, c fuzz.Continue) {
// Info is never nil
*v = &Info{}
c.FuzzNoCustom(*v)
(*v).Title = c.RandString() + "x"
},
func(v *Extensions, c fuzz.Continue) {
// gnostic parser only picks up x- vendor extensions
numChildren := c.Intn(5)
for i := 0; i < numChildren; i++ {
if *v == nil {
*v = Extensions{}
}
(*v)["x-"+c.RandString()] = c.RandString()
}
},
func(v *Swagger, c fuzz.Continue) {
c.FuzzNoCustom(v)
if v.Paths == nil {
// Force paths non-nil since it does not have omitempty in json tag.
// This means a perfect roundtrip (via json) is impossible,
// since we can't tell the difference between empty/unspecified paths
v.Paths = &Paths{}
c.Fuzz(v.Paths)
}
v.Swagger = "2.0"
// Gnostic support serializing ID at all
// unavoidable data loss
v.ID = ""
v.Schemes = nil
if c.RandUint64()%2 == 1 {
v.Schemes = append(v.Schemes, "http")
}
if c.RandUint64()%2 == 1 {
v.Schemes = append(v.Schemes, "https")
}
if c.RandUint64()%2 == 1 {
v.Schemes = append(v.Schemes, "ws")
}
if c.RandUint64()%2 == 1 {
v.Schemes = append(v.Schemes, "wss")
}
// Gnostic unconditionally makes security values non-null
// So do not fuzz null values into the array.
for i, val := range v.Security {
if val == nil {
v.Security[i] = make(map[string][]string)
}
for k, v := range val {
if v == nil {
val[k] = make([]string, 0)
}
}
}
},
func(v *SecurityScheme, c fuzz.Continue) {
v.Description = c.RandString() + "x"
c.Fuzz(&v.VendorExtensible)
switch c.Intn(3) {
case 0:
v.Type = "basic"
case 1:
v.Type = "apiKey"
switch c.Intn(2) {
case 0:
v.In = "header"
case 1:
v.In = "query"
default:
panic("unreachable")
}
v.Name = "x" + c.RandString()
case 2:
v.Type = "oauth2"
switch c.Intn(4) {
case 0:
v.Flow = "accessCode"
v.TokenURL = "https://" + c.RandString()
v.AuthorizationURL = "https://" + c.RandString()
case 1:
v.Flow = "application"
v.TokenURL = "https://" + c.RandString()
case 2:
v.Flow = "implicit"
v.AuthorizationURL = "https://" + c.RandString()
case 3:
v.Flow = "password"
v.TokenURL = "https://" + c.RandString()
default:
panic("unreachable")
}
c.Fuzz(&v.Scopes)
default:
panic("unreachable")
}
},
func(v *interface{}, c fuzz.Continue) {
*v = c.RandString() + "x"
},
func(v *string, c fuzz.Continue) {
*v = c.RandString() + "x"
},
func(v *ExternalDocumentation, c fuzz.Continue) {
v.Description = c.RandString() + "x"
v.URL = c.RandString() + "x"
},
func(v *SimpleSchema, c fuzz.Continue) {
c.FuzzNoCustom(v)
switch c.Intn(5) {
case 0:
v.Type = "string"
case 1:
v.Type = "number"
case 2:
v.Type = "boolean"
case 3:
v.Type = "integer"
case 4:
v.Type = "array"
default:
panic("unreachable")
}
switch c.Intn(5) {
case 0:
v.CollectionFormat = "csv"
case 1:
v.CollectionFormat = "ssv"
case 2:
v.CollectionFormat = "tsv"
case 3:
v.CollectionFormat = "pipes"
case 4:
v.CollectionFormat = ""
default:
panic("unreachable")
}
// None of the types which include SimpleSchema in our definitions
// actually support "example" in the official spec
v.Example = nil
// unsupported by openapi
v.Nullable = false
},
func(v *int64, c fuzz.Continue) {
c.Fuzz(v)
// Gnostic does not differentiate between 0 and non-specified
// so avoid using 0 for fuzzer
if *v == 0 {
*v = 1
}
},
func(v *float64, c fuzz.Continue) {
c.Fuzz(v)
// Gnostic does not differentiate between 0 and non-specified
// so avoid using 0 for fuzzer
if *v == 0.0 {
*v = 1.0
}
},
func(v *Parameter, c fuzz.Continue) {
if v == nil {
return
}
c.Fuzz(&v.VendorExtensible)
if c.RandBool() {
// body param
v.Description = c.RandString() + "x"
v.Name = c.RandString() + "x"
v.In = "body"
c.Fuzz(&v.Description)
c.Fuzz(&v.Required)
v.Schema = &Schema{}
c.Fuzz(&v.Schema)
} else {
c.Fuzz(&v.SimpleSchema)
c.Fuzz(&v.CommonValidations)
v.AllowEmptyValue = false
v.Description = c.RandString() + "x"
v.Name = c.RandString() + "x"
switch c.Intn(4) {
case 0:
// Header param
v.In = "header"
case 1:
// Form data param
v.In = "formData"
v.AllowEmptyValue = c.RandBool()
case 2:
// Query param
v.In = "query"
v.AllowEmptyValue = c.RandBool()
case 3:
// Path param
v.In = "path"
v.Required = true
default:
panic("unreachable")
}
// descendant Items of Parameter may not be refs
cur := v.Items
for cur != nil {
cur.Ref = Ref{}
cur = cur.Items
}
}
},
func(v *Schema, c fuzz.Continue) {
if c.RandBool() {
// file schema
c.Fuzz(&v.Default)
c.Fuzz(&v.Description)
c.Fuzz(&v.Example)
c.Fuzz(&v.ExternalDocs)
c.Fuzz(&v.Format)
c.Fuzz(&v.ReadOnly)
c.Fuzz(&v.Required)
c.Fuzz(&v.Title)
v.Type = StringOrArray{"file"}
} else {
// normal schema
c.Fuzz(&v.SchemaProps)
c.Fuzz(&v.SwaggerSchemaProps)
c.Fuzz(&v.VendorExtensible)
// c.Fuzz(&v.ExtraProps)
// ExtraProps will not roundtrip - gnostic throws out
// unrecognized keys
}
// Not supported by official openapi v2 spec
// and stripped by k8s apiserver
v.ID = ""
v.AnyOf = nil
v.OneOf = nil
v.Not = nil
v.Nullable = false
v.AdditionalItems = nil
v.Schema = ""
v.PatternProperties = nil
v.Definitions = nil
v.Dependencies = nil
},
}
var SwaggerDiffOptions = []cmp.Option{
// cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields
cmp.Comparer(func(a Ref, b Ref) bool {
return a.String() == b.String()
}),
}

View File

@ -67,6 +67,13 @@ func GetLoadBalancerSourceRanges(service *api.Service) (utilnet.IPNetSet, error)
return ipnets, nil
}
// ExternallyAccessible checks if service is externally accessible.
func ExternallyAccessible(service *api.Service) bool {
return service.Spec.Type == api.ServiceTypeLoadBalancer ||
service.Spec.Type == api.ServiceTypeNodePort ||
(service.Spec.Type == api.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0)
}
// RequestsOnlyLocalTraffic checks if service requests OnlyLocal traffic.
func RequestsOnlyLocalTraffic(service *api.Service) bool {
if service.Spec.Type != api.ServiceTypeLoadBalancer &&

99
vendor/k8s.io/kubernetes/pkg/api/v1/service/util.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
utilnet "k8s.io/utils/net"
)
const (
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
)
// IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0
func IsAllowAll(ipnets utilnet.IPNetSet) bool {
for _, s := range ipnets.StringSlice() {
if s == "0.0.0.0/0" {
return true
}
}
return false
}
// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service.
// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service,
// extracting the source ranges to allow, and if not present returns a default (allow-all) value.
func GetLoadBalancerSourceRanges(service *v1.Service) (utilnet.IPNetSet, error) {
var ipnets utilnet.IPNetSet
var err error
// if SourceRange field is specified, ignore sourceRange annotation
if len(service.Spec.LoadBalancerSourceRanges) > 0 {
specs := service.Spec.LoadBalancerSourceRanges
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err)
}
} else {
val := service.Annotations[v1.AnnotationLoadBalancerSourceRangesKey]
val = strings.TrimSpace(val)
if val == "" {
val = defaultLoadBalancerSourceRanges
}
specs := strings.Split(val, ",")
ipnets, err = utilnet.ParseIPNets(specs...)
if err != nil {
return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", v1.AnnotationLoadBalancerSourceRangesKey, val)
}
}
return ipnets, nil
}
// ExternallyAccessible checks if service is externally accessible.
func ExternallyAccessible(service *v1.Service) bool {
return service.Spec.Type == v1.ServiceTypeLoadBalancer ||
service.Spec.Type == v1.ServiceTypeNodePort ||
(service.Spec.Type == v1.ServiceTypeClusterIP && len(service.Spec.ExternalIPs) > 0)
}
// ExternalPolicyLocal checks if service is externally accessible and has ETP = Local.
func ExternalPolicyLocal(service *v1.Service) bool {
if !ExternallyAccessible(service) {
return false
}
return service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyLocal
}
// InternalPolicyLocal checks if service has ITP = Local.
func InternalPolicyLocal(service *v1.Service) bool {
if service.Spec.InternalTrafficPolicy == nil {
return false
}
return *service.Spec.InternalTrafficPolicy == v1.ServiceInternalTrafficPolicyLocal
}
// NeedsHealthCheck checks if service needs health check.
func NeedsHealthCheck(service *v1.Service) bool {
if service.Spec.Type != v1.ServiceTypeLoadBalancer {
return false
}
return ExternalPolicyLocal(service)
}

View File

@ -129,7 +129,7 @@ const (
// This is an action which might be taken on a pod failure - mark the
// Job's index as failed to avoid restarts within this index. This action
// can only be used when backoffLimitPerIndex is set.
// This value is alpha-level.
// This value is beta-level.
PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
// This is an action which might be taken on a pod failure - the counter towards
@ -306,8 +306,8 @@ type JobSpec struct {
// batch.kubernetes.io/job-index-failure-count annotation. It can only
// be set when Job's completionMode=Indexed, and the Pod's restart
// policy is Never. The field is immutable.
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (disabled by default).
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
BackoffLimitPerIndex *int32
@ -319,8 +319,8 @@ type JobSpec struct {
// It can only be specified when backoffLimitPerIndex is set.
// It can be null or up to completions. It is required and must be
// less than or equal to 10^4 when is completions greater than 10^5.
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (disabled by default).
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
MaxFailedIndexes *int32
@ -405,7 +405,8 @@ type JobSpec struct {
//
// When using podFailurePolicy, Failed is the the only allowed value.
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
// This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.
// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
// This is on by default.
// +optional
PodReplacementPolicy *PodReplacementPolicy
}
@ -443,15 +444,12 @@ type JobStatus struct {
// The number of pods which are terminating (in phase Pending or Running
// and have a deletionTimestamp).
//
// This field is alpha-level. The job controller populates the field when
// the feature gate JobPodReplacementPolicy is enabled (disabled by default).
// This field is beta-level. The job controller populates the field when
// the feature gate JobPodReplacementPolicy is enabled (enabled by default).
// +optional
Terminating *int32
// The number of active pods which have a Ready condition.
//
// This field is beta-level. The job controller populates the field when
// the feature gate JobReadyPods is enabled (enabled by default).
// +optional
Ready *int32
@ -481,8 +479,8 @@ type JobStatus struct {
// last element of the series, separated by a hyphen.
// For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (disabled by default).
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
// feature gate is enabled (enabled by default).
// +optional
FailedIndexes *string

View File

@ -113,34 +113,34 @@ var Semantic = conversion.EqualitiesOrDie(
},
)
var standardResourceQuotaScopes = sets.NewString(
string(core.ResourceQuotaScopeTerminating),
string(core.ResourceQuotaScopeNotTerminating),
string(core.ResourceQuotaScopeBestEffort),
string(core.ResourceQuotaScopeNotBestEffort),
string(core.ResourceQuotaScopePriorityClass),
var standardResourceQuotaScopes = sets.New(
core.ResourceQuotaScopeTerminating,
core.ResourceQuotaScopeNotTerminating,
core.ResourceQuotaScopeBestEffort,
core.ResourceQuotaScopeNotBestEffort,
core.ResourceQuotaScopePriorityClass,
)
// IsStandardResourceQuotaScope returns true if the scope is a standard value
func IsStandardResourceQuotaScope(str string) bool {
return standardResourceQuotaScopes.Has(str) || str == string(core.ResourceQuotaScopeCrossNamespacePodAffinity)
func IsStandardResourceQuotaScope(scope core.ResourceQuotaScope) bool {
return standardResourceQuotaScopes.Has(scope) || scope == core.ResourceQuotaScopeCrossNamespacePodAffinity
}
var podObjectCountQuotaResources = sets.NewString(
string(core.ResourcePods),
var podObjectCountQuotaResources = sets.New(
core.ResourcePods,
)
var podComputeQuotaResources = sets.NewString(
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceLimitsCPU),
string(core.ResourceLimitsMemory),
string(core.ResourceRequestsCPU),
string(core.ResourceRequestsMemory),
var podComputeQuotaResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
)
// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool {
func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource core.ResourceName) bool {
switch scope {
case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort,
core.ResourceQuotaScopePriorityClass, core.ResourceQuotaScopeCrossNamespacePodAffinity:
@ -152,16 +152,16 @@ func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resourc
}
}
var standardContainerResources = sets.NewString(
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceEphemeralStorage),
var standardContainerResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
)
// IsStandardContainerResourceName returns true if the container can make a resource request
// for the specified resource
func IsStandardContainerResourceName(str string) bool {
return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str))
func IsStandardContainerResourceName(name core.ResourceName) bool {
return standardContainerResources.Has(name) || IsHugePageResourceName(name)
}
// IsExtendedResourceName returns true if:
@ -196,88 +196,88 @@ func IsOvercommitAllowed(name core.ResourceName) bool {
!IsHugePageResourceName(name)
}
var standardLimitRangeTypes = sets.NewString(
string(core.LimitTypePod),
string(core.LimitTypeContainer),
string(core.LimitTypePersistentVolumeClaim),
var standardLimitRangeTypes = sets.New(
core.LimitTypePod,
core.LimitTypeContainer,
core.LimitTypePersistentVolumeClaim,
)
// IsStandardLimitRangeType returns true if the type is Pod or Container
func IsStandardLimitRangeType(str string) bool {
return standardLimitRangeTypes.Has(str)
func IsStandardLimitRangeType(value core.LimitType) bool {
return standardLimitRangeTypes.Has(value)
}
var standardQuotaResources = sets.NewString(
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceEphemeralStorage),
string(core.ResourceRequestsCPU),
string(core.ResourceRequestsMemory),
string(core.ResourceRequestsStorage),
string(core.ResourceRequestsEphemeralStorage),
string(core.ResourceLimitsCPU),
string(core.ResourceLimitsMemory),
string(core.ResourceLimitsEphemeralStorage),
string(core.ResourcePods),
string(core.ResourceQuotas),
string(core.ResourceServices),
string(core.ResourceReplicationControllers),
string(core.ResourceSecrets),
string(core.ResourcePersistentVolumeClaims),
string(core.ResourceConfigMaps),
string(core.ResourceServicesNodePorts),
string(core.ResourceServicesLoadBalancers),
var standardQuotaResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
core.ResourceRequestsStorage,
core.ResourceRequestsEphemeralStorage,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceLimitsEphemeralStorage,
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourcePersistentVolumeClaims,
core.ResourceConfigMaps,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsStandardQuotaResourceName returns true if the resource is known to
// the quota tracking system
func IsStandardQuotaResourceName(str string) bool {
return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str))
func IsStandardQuotaResourceName(name core.ResourceName) bool {
return standardQuotaResources.Has(name) || IsQuotaHugePageResourceName(name)
}
var standardResources = sets.NewString(
string(core.ResourceCPU),
string(core.ResourceMemory),
string(core.ResourceEphemeralStorage),
string(core.ResourceRequestsCPU),
string(core.ResourceRequestsMemory),
string(core.ResourceRequestsEphemeralStorage),
string(core.ResourceLimitsCPU),
string(core.ResourceLimitsMemory),
string(core.ResourceLimitsEphemeralStorage),
string(core.ResourcePods),
string(core.ResourceQuotas),
string(core.ResourceServices),
string(core.ResourceReplicationControllers),
string(core.ResourceSecrets),
string(core.ResourceConfigMaps),
string(core.ResourcePersistentVolumeClaims),
string(core.ResourceStorage),
string(core.ResourceRequestsStorage),
string(core.ResourceServicesNodePorts),
string(core.ResourceServicesLoadBalancers),
var standardResources = sets.New(
core.ResourceCPU,
core.ResourceMemory,
core.ResourceEphemeralStorage,
core.ResourceRequestsCPU,
core.ResourceRequestsMemory,
core.ResourceRequestsEphemeralStorage,
core.ResourceLimitsCPU,
core.ResourceLimitsMemory,
core.ResourceLimitsEphemeralStorage,
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourceConfigMaps,
core.ResourcePersistentVolumeClaims,
core.ResourceStorage,
core.ResourceRequestsStorage,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsStandardResourceName returns true if the resource is known to the system
func IsStandardResourceName(str string) bool {
return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str))
func IsStandardResourceName(name core.ResourceName) bool {
return standardResources.Has(name) || IsQuotaHugePageResourceName(name)
}
var integerResources = sets.NewString(
string(core.ResourcePods),
string(core.ResourceQuotas),
string(core.ResourceServices),
string(core.ResourceReplicationControllers),
string(core.ResourceSecrets),
string(core.ResourceConfigMaps),
string(core.ResourcePersistentVolumeClaims),
string(core.ResourceServicesNodePorts),
string(core.ResourceServicesLoadBalancers),
var integerResources = sets.New(
core.ResourcePods,
core.ResourceQuotas,
core.ResourceServices,
core.ResourceReplicationControllers,
core.ResourceSecrets,
core.ResourceConfigMaps,
core.ResourcePersistentVolumeClaims,
core.ResourceServicesNodePorts,
core.ResourceServicesLoadBalancers,
)
// IsIntegerResourceName returns true if the resource is measured in integer values
func IsIntegerResourceName(str string) bool {
return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str))
func IsIntegerResourceName(name core.ResourceName) bool {
return integerResources.Has(name) || IsExtendedResourceName(name)
}
// IsServiceIPSet aims to check if the service's ClusterIP is set or not
@ -289,7 +289,7 @@ func IsServiceIPSet(service *core.Service) bool {
service.Spec.ClusterIP != core.ClusterIPNone
}
var standardFinalizers = sets.NewString(
var standardFinalizers = sets.New(
string(core.FinalizerKubernetes),
metav1.FinalizerOrphanDependents,
metav1.FinalizerDeleteDependents,

View File

@ -30,12 +30,22 @@ func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod.
// GetPodQOS returns the QoS class of a pod persisted in the PodStatus.QOSClass field.
// If PodStatus.QOSClass is empty, it returns value of ComputePodQOS() which evaluates pod's QoS class.
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
if pod.Status.QOSClass != "" {
return pod.Status.QOSClass
}
return ComputePodQOS(pod)
}
// ComputePodQOS evaluates the list of containers to determine a pod's QoS class. This function is more
// expensive than GetPodQOS which should be used for pods having a non-empty .Status.QOSClass.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
func ComputePodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
zeroQuantity := resource.MustParse("0")

View File

@ -335,6 +335,16 @@ type PersistentVolumeSpec struct {
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity
// Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
// is not allowed. When this field is not set, it indicates that this volume does not belong to any
// VolumeAttributesClass. This field is mutable and can be changed by the CSI driver
// after a volume has been updated successfully to a new class.
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
// PersistentVolumeClaims during the binding process.
// This is an alpha field and requires enabling VolumeAttributesClass feature.
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
@ -440,7 +450,7 @@ type PersistentVolumeClaimSpec struct {
// that are lower than previous value but must still be higher than capacity recorded in the
// status field of the claim.
// +optional
Resources ResourceRequirements
Resources VolumeResourceRequirements
// VolumeName is the binding reference to the PersistentVolume backing this
// claim. When set to non-empty value Selector is not evaluated
// +optional
@ -488,6 +498,21 @@ type PersistentVolumeClaimSpec struct {
// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
// +optional
DataSourceRef *TypedObjectReference
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
// If specified, the CSI driver will create or update the volume with the attributes defined
// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
// will be set by the persistentvolume controller if it exists.
// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
// exists.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string
}
type TypedObjectReference struct {
@ -518,6 +543,11 @@ const (
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
// Applying the target VolumeAttributesClass encountered an error
PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
// Volume is being modified
PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume"
)
// +enum
@ -544,6 +574,38 @@ const (
PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
)
// +enum
// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately
type PersistentVolumeClaimModifyVolumeStatus string
const (
// Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
// the specified VolumeAttributesClass not existing
PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending"
// InProgress indicates that the volume is being modified
PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress"
// Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
// resolve the error, a valid VolumeAttributesClass needs to be specified
PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible"
)
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation
type ModifyVolumeStatus struct {
// targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled
TargetVolumeAttributesClassName string
// status is the status of the ControllerModifyVolume operation. It can be in any of following states:
// - Pending
// Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
// the specified VolumeAttributesClass not existing.
// - InProgress
// InProgress indicates that the volume is being modified.
// - Infeasible
// Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
// resolve the error, a valid VolumeAttributesClass needs to be specified.
// Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.
Status PersistentVolumeClaimModifyVolumeStatus
}
// PersistentVolumeClaimCondition represents the current condition of PV claim
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType
@ -635,6 +697,18 @@ type PersistentVolumeClaimStatus struct {
// +mapType=granular
// +optional
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
// This is an alpha field and requires enabling VolumeAttributesClass feature.
// +featureGate=VolumeAttributesClass
// +optional
CurrentVolumeAttributesClassName *string
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
// When this is unset, there is no ModifyVolume operation being attempted.
// This is an alpha field and requires enabling VolumeAttributesClass feature.
// +featureGate=VolumeAttributesClass
// +optional
ModifyVolumeStatus *ModifyVolumeStatus
}
// PersistentVolumeAccessMode defines various access modes for PV.
@ -1685,6 +1759,29 @@ type ServiceAccountTokenProjection struct {
Path string
}
// ClusterTrustBundleProjection allows a pod to access the
// `.spec.trustBundle` field of a ClusterTrustBundle object in an auto-updating
// file.
type ClusterTrustBundleProjection struct {
// Select a single ClusterTrustBundle by object name. Mutually-exclusive
// with SignerName and LabelSelector.
Name *string
// Select all ClusterTrustBundles for this signer that match LabelSelector.
// Mutually-exclusive with Name.
SignerName *string
// Select all ClusterTrustBundles that match this LabelSelecotr.
// Mutually-exclusive with Name.
LabelSelector *metav1.LabelSelector
// Block pod startup if the selected ClusterTrustBundle(s) aren't available?
Optional *bool
// Relative path from the volume root to write the bundle.
Path string
}
// ProjectedVolumeSource represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
@ -1710,6 +1807,8 @@ type VolumeProjection struct {
ConfigMap *ConfigMapProjection
// information about the serviceAccountToken data to project
ServiceAccountToken *ServiceAccountTokenProjection
// information about the ClusterTrustBundle data to project
ClusterTrustBundle *ClusterTrustBundleProjection
}
// KeyToPath maps a string key to a path within a volume.
@ -1805,10 +1904,8 @@ type CSIPersistentVolumeSource struct {
// NodeExpandSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodeExpandVolume call.
// This is a beta field which is enabled default by CSINodeExpandSecret feature gate.
// This field is optional, may be omitted if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +featureGate=CSINodeExpandSecret
// +optional
NodeExpandSecretRef *SecretReference
}
@ -2150,6 +2247,12 @@ type ExecAction struct {
Command []string
}
// SleepAction describes a "sleep" action.
type SleepAction struct {
// Seconds is the number of seconds to sleep.
Seconds int64
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
@ -2282,6 +2385,18 @@ type ResourceRequirements struct {
Claims []ResourceClaim
}
// VolumeResourceRequirements describes the storage resource requirements for a volume.
type VolumeResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// +optional
Limits ResourceList
// Requests describes the minimum amount of compute resources required.
// If Request is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value
// +optional
Requests ResourceList
}
// ResourceClaim references one entry in PodSpec.ResourceClaims.
type ResourceClaim struct {
// Name must match the name of one entry in pod.spec.resourceClaims of
@ -2420,6 +2535,10 @@ type LifecycleHandler struct {
// lifecycle hooks will fail in runtime when tcp handler is specified.
// +optional
TCPSocket *TCPSocketAction
// Sleep represents the duration that the container should sleep before being terminated.
// +featureGate=PodLifecycleSleepAction
// +optional
Sleep *SleepAction
}
type GRPCAction struct {
@ -2617,12 +2736,6 @@ const (
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
// PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler
// skips scheduling the pod because one or more scheduling gates are still present.
PodReasonSchedulingGated = "SchedulingGated"
// ContainersReady indicates whether all containers in the pod are ready.
ContainersReady PodConditionType = "ContainersReady"
// DisruptionTarget indicates the pod is about to be terminated due to a
@ -2886,6 +2999,7 @@ type WeightedPodAffinityTerm struct {
// a pod of the set of pods is running.
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// If it's null, this PodAffinityTerm matches with no Pods.
// +optional
LabelSelector *metav1.LabelSelector
// namespaces specifies a static list of namespace names that the term applies to.
@ -2907,6 +3021,24 @@ type PodAffinityTerm struct {
// An empty selector ({}) matches all namespaces.
// +optional
NamespaceSelector *metav1.LabelSelector
// MatchLabelKeys is a set of pod label keys to select which pods will
// be taken into consideration. The keys are used to lookup values from the
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
// to select the group of existing pods which pods will be taken into consideration
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
// pod labels will be ignored. The default value is empty.
// +listType=atomic
// +optional
MatchLabelKeys []string
// MismatchLabelKeys is a set of pod label keys to select which pods will
// be taken into consideration. The keys are used to lookup values from the
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
// to select the group of existing pods which pods will be taken into consideration
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
// pod labels will be ignored. The default value is empty.
// +listType=atomic
// +optional
MismatchLabelKeys []string
}
// NodeAffinity is a group of node affinity scheduling rules.
@ -4074,6 +4206,15 @@ type LoadBalancerIngress struct {
// +optional
Hostname string
// IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.
// Setting this to "VIP" indicates that traffic is delivered to the node with
// the destination set to the load-balancer's IP and port.
// Setting this to "Proxy" indicates that traffic is delivered to the node or pod with
// the destination set to the node's IP and node port or the pod's IP and port.
// Service implementations may use this information to adjust traffic routing.
// +optional
IPMode *LoadBalancerIPMode
// Ports is a list of records of service ports
// If used, every port defined in the service should have an entry in it
// +optional
@ -4310,7 +4451,7 @@ type ServicePort struct {
// RFC-6335 and https://www.iana.org/assignments/service-names).
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
//
@ -4475,7 +4616,7 @@ type EndpointPort struct {
// RFC-6335 and https://www.iana.org/assignments/service-names).
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
//
@ -4596,7 +4737,7 @@ type NodeSystemInfo struct {
ContainerRuntimeVersion string
// Kubelet Version reported by the node.
KubeletVersion string
// KubeProxy Version reported by the node.
// Deprecated: KubeProxy Version reported by the node.
KubeProxyVersion string
// The Operating System reported by the node
OperatingSystem string
@ -6146,3 +6287,15 @@ type PortStatus struct {
// +kubebuilder:validation:MaxLength=316
Error *string
}
// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP
type LoadBalancerIPMode string
const (
// LoadBalancerIPModeVIP indicates that traffic is delivered to the node with
// the destination set to the load-balancer's IP and port.
LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP"
// LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with
// the destination set to the node's IP and port or the pod's IP and port.
LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
)

View File

@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
"k8s.io/utils/pointer"
@ -122,11 +123,9 @@ func SetDefaults_Service(obj *v1.Service) {
sp.TargetPort = intstr.FromInt32(sp.Port)
}
}
// Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service
// Defaults ExternalTrafficPolicy field for externally-accessible service
// to Global for consistency.
if (obj.Spec.Type == v1.ServiceTypeNodePort ||
obj.Spec.Type == v1.ServiceTypeLoadBalancer) &&
obj.Spec.ExternalTrafficPolicy == "" {
if service.ExternallyAccessible(obj) && obj.Spec.ExternalTrafficPolicy == "" {
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
}
@ -142,6 +141,19 @@ func SetDefaults_Service(obj *v1.Service) {
obj.Spec.AllocateLoadBalancerNodePorts = pointer.Bool(true)
}
}
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
if utilfeature.DefaultFeatureGate.Enabled(features.LoadBalancerIPMode) {
ipMode := v1.LoadBalancerIPModeVIP
for i, ing := range obj.Status.LoadBalancer.Ingress {
if ing.IP != "" && ing.IPMode == nil {
obj.Status.LoadBalancer.Ingress[i].IPMode = &ipMode
}
}
}
}
}
func SetDefaults_Pod(obj *v1.Pod) {
// If limits are specified, but requests are not, default requests to limits

View File

@ -212,6 +212,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ClusterTrustBundleProjection)(nil), (*core.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(a.(*v1.ClusterTrustBundleProjection), b.(*core.ClusterTrustBundleProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ClusterTrustBundleProjection)(nil), (*v1.ClusterTrustBundleProjection)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(a.(*core.ClusterTrustBundleProjection), b.(*v1.ClusterTrustBundleProjection), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ComponentCondition)(nil), (*core.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ComponentCondition_To_core_ComponentCondition(a.(*v1.ComponentCondition), b.(*core.ComponentCondition), scope)
}); err != nil {
@ -882,6 +892,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ModifyVolumeStatus)(nil), (*core.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(a.(*v1.ModifyVolumeStatus), b.(*core.ModifyVolumeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ModifyVolumeStatus)(nil), (*v1.ModifyVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(a.(*core.ModifyVolumeStatus), b.(*v1.ModifyVolumeStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.NFSVolumeSource)(nil), (*core.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(a.(*v1.NFSVolumeSource), b.(*core.NFSVolumeSource), scope)
}); err != nil {
@ -1937,6 +1957,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.SleepAction)(nil), (*core.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SleepAction_To_core_SleepAction(a.(*v1.SleepAction), b.(*core.SleepAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.SleepAction)(nil), (*v1.SleepAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_SleepAction_To_v1_SleepAction(a.(*core.SleepAction), b.(*v1.SleepAction), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.StorageOSPersistentVolumeSource)(nil), (*core.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(a.(*v1.StorageOSPersistentVolumeSource), b.(*core.StorageOSPersistentVolumeSource), scope)
}); err != nil {
@ -2087,6 +2117,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.VolumeResourceRequirements)(nil), (*core.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(a.(*v1.VolumeResourceRequirements), b.(*core.VolumeResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.VolumeResourceRequirements)(nil), (*v1.VolumeResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(a.(*core.VolumeResourceRequirements), b.(*v1.VolumeResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.VolumeSource)(nil), (*core.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeSource_To_core_VolumeSource(a.(*v1.VolumeSource), b.(*core.VolumeSource), scope)
}); err != nil {
@ -2735,6 +2775,34 @@ func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, o
return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s)
}
func autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *v1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.SignerName = (*string)(unsafe.Pointer(in.SignerName))
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
out.Path = in.Path
return nil
}
// Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection is an autogenerated conversion function.
func Convert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in *v1.ClusterTrustBundleProjection, out *core.ClusterTrustBundleProjection, s conversion.Scope) error {
return autoConvert_v1_ClusterTrustBundleProjection_To_core_ClusterTrustBundleProjection(in, out, s)
}
func autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *v1.ClusterTrustBundleProjection, s conversion.Scope) error {
out.Name = (*string)(unsafe.Pointer(in.Name))
out.SignerName = (*string)(unsafe.Pointer(in.SignerName))
out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.Optional = (*bool)(unsafe.Pointer(in.Optional))
out.Path = in.Path
return nil
}
// Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection is an autogenerated conversion function.
func Convert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in *core.ClusterTrustBundleProjection, out *v1.ClusterTrustBundleProjection, s conversion.Scope) error {
return autoConvert_core_ClusterTrustBundleProjection_To_v1_ClusterTrustBundleProjection(in, out, s)
}
func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error {
out.Type = core.ComponentConditionType(in.Type)
out.Status = core.ConditionStatus(in.Status)
@ -4315,6 +4383,7 @@ func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHa
out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec))
out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
out.Sleep = (*core.SleepAction)(unsafe.Pointer(in.Sleep))
return nil
}
@ -4327,6 +4396,7 @@ func autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.Lifecycle
out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec))
out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
out.Sleep = (*v1.SleepAction)(unsafe.Pointer(in.Sleep))
return nil
}
@ -4478,6 +4548,7 @@ func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scop
func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.IPMode = (*core.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode))
out.Ports = *(*[]core.PortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
@ -4490,6 +4561,7 @@ func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalan
func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error {
out.IP = in.IP
out.Hostname = in.Hostname
out.IPMode = (*v1.LoadBalancerIPMode)(unsafe.Pointer(in.IPMode))
out.Ports = *(*[]v1.PortStatus)(unsafe.Pointer(&in.Ports))
return nil
}
@ -4551,6 +4623,28 @@ func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolume
return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s)
}
func autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *v1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error {
out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName
out.Status = core.PersistentVolumeClaimModifyVolumeStatus(in.Status)
return nil
}
// Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus is an autogenerated conversion function.
func Convert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in *v1.ModifyVolumeStatus, out *core.ModifyVolumeStatus, s conversion.Scope) error {
return autoConvert_v1_ModifyVolumeStatus_To_core_ModifyVolumeStatus(in, out, s)
}
func autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *v1.ModifyVolumeStatus, s conversion.Scope) error {
out.TargetVolumeAttributesClassName = in.TargetVolumeAttributesClassName
out.Status = v1.PersistentVolumeClaimModifyVolumeStatus(in.Status)
return nil
}
// Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus is an autogenerated conversion function.
func Convert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in *core.ModifyVolumeStatus, out *v1.ModifyVolumeStatus, s conversion.Scope) error {
return autoConvert_core_ModifyVolumeStatus_To_v1_ModifyVolumeStatus(in, out, s)
}
func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error {
out.Server = in.Server
out.Path = in.Path
@ -5321,7 +5415,7 @@ func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *
func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error {
out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
if err := Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.VolumeName = in.VolumeName
@ -5329,6 +5423,7 @@ func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.DataSource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
out.DataSourceRef = (*core.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
@ -5340,7 +5435,7 @@ func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *
func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error {
out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector))
if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
if err := Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.VolumeName = in.VolumeName
@ -5348,6 +5443,7 @@ func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(
out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.DataSource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
out.DataSourceRef = (*v1.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
@ -5363,6 +5459,8 @@ func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimSta
out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.AllocatedResourceStatuses = *(*map[core.ResourceName]core.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses))
out.CurrentVolumeAttributesClassName = (*string)(unsafe.Pointer(in.CurrentVolumeAttributesClassName))
out.ModifyVolumeStatus = (*core.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus))
return nil
}
@ -5378,6 +5476,8 @@ func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimSta
out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.AllocatedResourceStatuses = *(*map[v1.ResourceName]v1.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses))
out.CurrentVolumeAttributesClassName = (*string)(unsafe.Pointer(in.CurrentVolumeAttributesClassName))
out.ModifyVolumeStatus = (*v1.ModifyVolumeStatus)(unsafe.Pointer(in.ModifyVolumeStatus))
return nil
}
@ -5550,6 +5650,7 @@ func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.Per
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*core.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
@ -5565,6 +5666,7 @@ func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.P
out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions))
out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.NodeAffinity = (*v1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity))
out.VolumeAttributesClassName = (*string)(unsafe.Pointer(in.VolumeAttributesClassName))
return nil
}
@ -5665,6 +5767,8 @@ func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTe
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
out.TopologyKey = in.TopologyKey
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys))
out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys))
return nil
}
@ -5678,6 +5782,8 @@ func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinity
out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
out.TopologyKey = in.TopologyKey
out.NamespaceSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector))
out.MatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MatchLabelKeys))
out.MismatchLabelKeys = *(*[]string)(unsafe.Pointer(&in.MismatchLabelKeys))
return nil
}
@ -8055,6 +8161,26 @@ func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.Ses
return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s)
}
func autoConvert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error {
out.Seconds = in.Seconds
return nil
}
// Convert_v1_SleepAction_To_core_SleepAction is an autogenerated conversion function.
func Convert_v1_SleepAction_To_core_SleepAction(in *v1.SleepAction, out *core.SleepAction, s conversion.Scope) error {
return autoConvert_v1_SleepAction_To_core_SleepAction(in, out, s)
}
func autoConvert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error {
out.Seconds = in.Seconds
return nil
}
// Convert_core_SleepAction_To_v1_SleepAction is an autogenerated conversion function.
func Convert_core_SleepAction_To_v1_SleepAction(in *core.SleepAction, out *v1.SleepAction, s conversion.Scope) error {
return autoConvert_core_SleepAction_To_v1_SleepAction(in, out, s)
}
func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error {
out.VolumeName = in.VolumeName
out.VolumeNamespace = in.VolumeNamespace
@ -8436,6 +8562,7 @@ func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProje
} else {
out.ServiceAccountToken = nil
}
out.ClusterTrustBundle = (*core.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle))
return nil
}
@ -8457,6 +8584,7 @@ func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumePro
} else {
out.ServiceAccountToken = nil
}
out.ClusterTrustBundle = (*v1.ClusterTrustBundleProjection)(unsafe.Pointer(in.ClusterTrustBundle))
return nil
}
@ -8465,6 +8593,28 @@ func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProject
return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s)
}
func autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *v1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error {
out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements is an autogenerated conversion function.
func Convert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in *v1.VolumeResourceRequirements, out *core.VolumeResourceRequirements, s conversion.Scope) error {
return autoConvert_v1_VolumeResourceRequirements_To_core_VolumeResourceRequirements(in, out, s)
}
func autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *v1.VolumeResourceRequirements, s conversion.Scope) error {
out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests))
return nil
}
// Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements is an autogenerated conversion function.
func Convert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in *core.VolumeResourceRequirements, out *v1.VolumeResourceRequirements, s conversion.Scope) error {
return autoConvert_core_VolumeResourceRequirements_To_v1_VolumeResourceRequirements(in, out, s)
}
func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error {
out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath))
out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir))

View File

@ -0,0 +1,132 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"strings"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// ValidateSignerName checks that signerName is syntactically valid.
//
// ensure signerName is of the form domain.com/something and up to 571 characters.
// This length and format is specified to accommodate signerNames like:
// <fqdn>/<resource-namespace>.<resource-name>.
// The max length of a FQDN is 253 characters (DNS1123Subdomain max length)
// The max length of a namespace name is 63 characters (DNS1123Label max length)
// The max length of a resource name is 253 characters (DNS1123Subdomain max length)
// We then add an additional 2 characters to account for the one '.' and one '/'.
func ValidateSignerName(fldPath *field.Path, signerName string) field.ErrorList {
var el field.ErrorList
if len(signerName) == 0 {
el = append(el, field.Required(fldPath, ""))
return el
}
segments := strings.Split(signerName, "/")
// validate that there is one '/' in the signerName.
// we do this after validating the domain segment to provide more info to the user.
if len(segments) != 2 {
el = append(el, field.Invalid(fldPath, signerName, "must be a fully qualified domain and path of the form 'example.com/signer-name'"))
// return early here as we should not continue attempting to validate a missing or malformed path segment
// (i.e. one containing multiple or zero `/`)
return el
}
// validate that segments[0] is less than 253 characters altogether
maxDomainSegmentLength := validation.DNS1123SubdomainMaxLength
if len(segments[0]) > maxDomainSegmentLength {
el = append(el, field.TooLong(fldPath, segments[0], maxDomainSegmentLength))
}
// validate that segments[0] consists of valid DNS1123 labels separated by '.'
domainLabels := strings.Split(segments[0], ".")
for _, lbl := range domainLabels {
// use IsDNS1123Label as we want to ensure the max length of any single label in the domain
// is 63 characters
if errs := validation.IsDNS1123Label(lbl); len(errs) > 0 {
for _, err := range errs {
el = append(el, field.Invalid(fldPath, segments[0], fmt.Sprintf("validating label %q: %s", lbl, err)))
}
// if we encounter any errors whilst parsing the domain segment, break from
// validation as any further error messages will be duplicates, and non-distinguishable
// from each other, confusing users.
break
}
}
// validate that there is at least one '.' in segments[0]
if len(domainLabels) < 2 {
el = append(el, field.Invalid(fldPath, segments[0], "should be a domain with at least two segments separated by dots"))
}
// validate that segments[1] consists of valid DNS1123 subdomains separated by '.'.
pathLabels := strings.Split(segments[1], ".")
for _, lbl := range pathLabels {
// use IsDNS1123Subdomain because it enforces a length restriction of 253 characters
// which is required in order to fit a full resource name into a single 'label'
if errs := validation.IsDNS1123Subdomain(lbl); len(errs) > 0 {
for _, err := range errs {
el = append(el, field.Invalid(fldPath, segments[1], fmt.Sprintf("validating label %q: %s", lbl, err)))
}
// if we encounter any errors whilst parsing the path segment, break from
// validation as any further error messages will be duplicates, and non-distinguishable
// from each other, confusing users.
break
}
}
// ensure that segments[1] can accommodate a dns label + dns subdomain + '.'
maxPathSegmentLength := validation.DNS1123SubdomainMaxLength + validation.DNS1123LabelMaxLength + 1
maxSignerNameLength := maxDomainSegmentLength + maxPathSegmentLength + 1
if len(signerName) > maxSignerNameLength {
el = append(el, field.TooLong(fldPath, signerName, maxSignerNameLength))
}
return el
}
// ValidateClusterTrustBundleName checks that a ClusterTrustBundle name conforms
// to the rules documented on the type.
func ValidateClusterTrustBundleName(signerName string) func(name string, prefix bool) []string {
return func(name string, isPrefix bool) []string {
if signerName == "" {
if strings.Contains(name, ":") {
return []string{"ClusterTrustBundle without signer name must not have \":\" in its name"}
}
return apimachineryvalidation.NameIsDNSSubdomain(name, isPrefix)
}
requiredPrefix := strings.ReplaceAll(signerName, "/", ":") + ":"
if !strings.HasPrefix(name, requiredPrefix) {
return []string{fmt.Sprintf("ClusterTrustBundle for signerName %s must be named with prefix %s", signerName, requiredPrefix)}
}
return apimachineryvalidation.NameIsDNSSubdomain(strings.TrimPrefix(name, requiredPrefix), isPrefix)
}
}
func extractSignerNameFromClusterTrustBundleName(name string) (string, bool) {
if splitPoint := strings.LastIndex(name, ":"); splitPoint != -1 {
// This looks like it refers to a signerName trustbundle.
return strings.ReplaceAll(name[:splitPoint], ":", "/"), true
} else {
return "", false
}
}

File diff suppressed because it is too large Load Diff

View File

@ -466,6 +466,42 @@ func (in *ClientIPConfig) DeepCopy() *ClientIPConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) {
*out = *in
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.SignerName != nil {
in, out := &in.SignerName, &out.SignerName
*out = new(string)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Optional != nil {
in, out := &in.Optional, &out.Optional
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection.
func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection {
if in == nil {
return nil
}
out := new(ClusterTrustBundleProjection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) {
*out = *in
@ -2045,6 +2081,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) {
*out = new(TCPSocketAction)
**out = **in
}
if in.Sleep != nil {
in, out := &in.Sleep, &out.Sleep
*out = new(SleepAction)
**out = **in
}
return
}
@ -2230,6 +2271,11 @@ func (in *List) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
*out = *in
if in.IPMode != nil {
in, out := &in.IPMode, &out.IPMode
*out = new(LoadBalancerIPMode)
**out = **in
}
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]PortStatus, len(*in))
@ -2310,6 +2356,22 @@ func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus.
func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus {
if in == nil {
return nil
}
out := new(ModifyVolumeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) {
*out = *in
@ -3058,6 +3120,11 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
*out = new(TypedObjectReference)
(*in).DeepCopyInto(*out)
}
if in.VolumeAttributesClassName != nil {
in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
*out = new(string)
**out = **in
}
return
}
@ -3107,6 +3174,16 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt
(*out)[key] = val
}
}
if in.CurrentVolumeAttributesClassName != nil {
in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName
*out = new(string)
**out = **in
}
if in.ModifyVolumeStatus != nil {
in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus
*out = new(ModifyVolumeStatus)
**out = **in
}
return
}
@ -3349,6 +3426,11 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
*out = new(VolumeNodeAffinity)
(*in).DeepCopyInto(*out)
}
if in.VolumeAttributesClassName != nil {
in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
*out = new(string)
**out = **in
}
return
}
@ -3474,6 +3556,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MatchLabelKeys != nil {
in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MismatchLabelKeys != nil {
in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -5666,6 +5758,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SleepAction) DeepCopyInto(out *SleepAction) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction.
func (in *SleepAction) DeepCopy() *SleepAction {
if in == nil {
return nil
}
out := new(SleepAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) {
*out = *in
@ -6012,6 +6120,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
*out = new(ServiceAccountTokenProjection)
**out = **in
}
if in.ClusterTrustBundle != nil {
in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle
*out = new(ClusterTrustBundleProjection)
(*in).DeepCopyInto(*out)
}
return
}
@ -6025,6 +6138,36 @@ func (in *VolumeProjection) DeepCopy() *VolumeProjection {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements.
func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements {
if in == nil {
return nil
}
out := new(VolumeResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
*out = *in

View File

@ -52,10 +52,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&IngressList{},
&IngressClass{},
&IngressClassList{},
&ClusterCIDR{},
&ClusterCIDRList{},
&IPAddress{},
&IPAddressList{},
&ServiceCIDR{},
&ServiceCIDRList{},
)
return nil
}

View File

@ -18,7 +18,6 @@ package networking
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
@ -599,71 +598,6 @@ type ServiceBackendPort struct {
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
// kube-controller-manager). A cluster may have any number of ClusterCIDR
// resources, all of which will be considered when allocating a CIDR for a
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
// selector matches the node in question and has free CIDRs to allocate. In
// case of multiple matching ClusterCIDR resources, the allocator will attempt
// to break ties using internal heuristics, but any ClusterCIDR whose node
// selector matches the Node may be used.
type ClusterCIDR struct {
metav1.TypeMeta
metav1.ObjectMeta
Spec ClusterCIDRSpec
}
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
type ClusterCIDRSpec struct {
// nodeSelector defines which nodes the config is applicable to.
// An empty or nil nodeSelector selects all nodes.
// This field is immutable.
// +optional
NodeSelector *api.NodeSelector
// perNodeHostBits defines the number of host bits to be configured per node.
// A subnet mask determines how much of the address is used for network bits
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
// address into 24 bits for the network portion and 8 bits for the host portion.
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
// Minimum value is 4 (16 IPs).
// This field is immutable.
// +required
PerNodeHostBits int32
// ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
// At least one of ipv4 and ipv6 must be specified.
// This field is immutable.
// +optional
IPv4 string
// ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
// At least one of ipv4 and ipv6 must be specified.
// This field is immutable.
// +optional
IPv6 string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterCIDRList contains a list of ClusterCIDRs.
type ClusterCIDRList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
// items is the list of ClusterCIDRs.
Items []ClusterCIDR
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
@ -695,9 +629,6 @@ type ParentReference struct {
Namespace string
// Name is the name of the object being referenced.
Name string
// UID is the uid of the object being referenced.
// +optional
UID types.UID
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -711,3 +642,53 @@ type IPAddressList struct {
// Items is the list of IPAddress
Items []IPAddress
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
// This range is used to allocate ClusterIPs to Service objects.
type ServiceCIDR struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// spec is the desired state of the ServiceCIDR.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ServiceCIDRSpec
// status represents the current state of the ServiceCIDR.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ServiceCIDRStatus
}
type ServiceCIDRSpec struct {
// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
// This field is immutable.
// +optional
CIDRs []string
}
// ServiceCIDRStatus describes the current state of the ServiceCIDR.
type ServiceCIDRStatus struct {
// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
Conditions []metav1.Condition
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.27
// ServiceCIDRList contains a list of ServiceCIDR objects.
type ServiceCIDRList struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// items is the list of ServiceCIDRs.
Items []ServiceCIDR
}

View File

@ -28,87 +28,6 @@ import (
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR.
func (in *ClusterCIDR) DeepCopy() *ClusterCIDR {
if in == nil {
return nil
}
out := new(ClusterCIDR)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterCIDR) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterCIDR, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList.
func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList {
if in == nil {
return nil
}
out := new(ClusterCIDRList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterCIDRList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(core.NodeSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec.
func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec {
if in == nil {
return nil
}
out := new(ClusterCIDRSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
*out = *in
@ -904,3 +823,108 @@ func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
if in == nil {
return nil
}
out := new(ServiceCIDR)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ServiceCIDR, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
if in == nil {
return nil
}
out := new(ServiceCIDRList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
*out = *in
if in.CIDRs != nil {
in, out := &in.CIDRs, &out.CIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
if in == nil {
return nil
}
out := new(ServiceCIDRSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
if in == nil {
return nil
}
out := new(ServiceCIDRStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -44,6 +44,13 @@ const (
// Enable usage of Provision of PVCs from snapshots in other namespaces
CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource"
// owner: @thockin
// deprecated: v1.29
//
// Enables Service.status.ingress.loadBanace to be set on
// services of types other than LoadBalancer.
AllowServiceLBStatusOnNonLB featuregate.Feature = "AllowServiceLBStatusOnNonLB"
// owner: @bswartz
// alpha: v1.18
// beta: v1.24
@ -65,6 +72,7 @@ const (
// owner: @danwinship
// alpha: v1.27
// beta: v1.29
//
// Enables dual-stack --node-ip in kubelet with external cloud providers
CloudDualStackNodeIPs featuregate.Feature = "CloudDualStackNodeIPs"
@ -75,6 +83,12 @@ const (
// Enable ClusterTrustBundle objects and Kubelet integration.
ClusterTrustBundle featuregate.Feature = "ClusterTrustBundle"
// owner: @ahmedtd
// alpha: v1.28
//
// Enable ClusterTrustBundle Kubelet projected volumes. Depends on ClusterTrustBundle.
ClusterTrustBundleProjection featuregate.Feature = "ClusterTrustBundleProjection"
// owner: @szuecs
// alpha: v1.12
//
@ -134,7 +148,8 @@ const (
// owner: @mfordjody
// alpha: v1.26
//
// Skip validation Enable in next version
// Bypasses obsolete validation that GCP volumes are read-only when used in
// Deployments.
SkipReadOnlyValidationGCE featuregate.Feature = "SkipReadOnlyValidationGCE"
// owner: @trierra
@ -150,17 +165,11 @@ const (
// Enables the RBD in-tree driver to RBD CSI Driver migration feature.
CSIMigrationRBD featuregate.Feature = "CSIMigrationRBD"
// owner: @divyenpatel
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u2, HW Version: VM version 15)
// GA: 1.26
// Enables the vSphere in-tree driver to vSphere CSI Driver migration feature.
CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere"
// owner: @humblec, @zhucan
// kep: https://kep.k8s.io/3171
// alpha: v1.25
// beta: v1.27
//
// GA: v1.29
// Enables SecretRef field in CSI NodeExpandVolume request.
CSINodeExpandSecret featuregate.Feature = "CSINodeExpandSecret"
@ -170,6 +179,14 @@ const (
// Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it.
CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth"
// owner: @seans3
// kep: http://kep.k8s.io/4006
// alpha: v1.29
//
// Enables StreamTranslator proxy to handle WebSockets upgrade requests for the
// version of the RemoteCommand subprotocol that supports the "close" signal.
TranslateStreamCloseWebsocketRequests featuregate.Feature = "TranslateStreamCloseWebsocketRequests"
// owner: @nckturner
// kep: http://kep.k8s.io/2699
// alpha: v1.27
@ -194,15 +211,6 @@ const (
// Set the scheduled time as an annotation in the job.
CronJobsScheduledAnnotation featuregate.Feature = "CronJobsScheduledAnnotation"
// owner: @deejross, @soltysh
// kep: https://kep.k8s.io/3140
// alpha: v1.24
// beta: v1.25
// GA: 1.27
//
// Enables support for time zones in CronJobs.
CronJobTimeZone featuregate.Feature = "CronJobTimeZone"
// owner: @thockin
// deprecated: v1.28
//
@ -215,29 +223,30 @@ const (
// owner: @elezar
// kep: http://kep.k8s.io/4009
// alpha: v1.28
// beta: v1.29
//
// Add support for CDI Device IDs in the Device Plugin API.
DevicePluginCDIDevices featuregate.Feature = "DevicePluginCDIDevices"
// owner: @andrewsykim
// alpha: v1.22
// beta: v1.29
//
// Disable any functionality in kube-apiserver, kube-controller-manager and kubelet related to the `--cloud-provider` component flag.
DisableCloudProviders featuregate.Feature = "DisableCloudProviders"
// owner: @andrewsykim
// alpha: v1.23
// beta: v1.29
//
// Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials.
DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders"
// owner: @derekwaynecarr
// alpha: v1.20
// beta: v1.21 (off by default until 1.22)
// ga: v1.27
//
// Enables usage of hugepages-<size> in downward API.
DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
// owner: @HirazawaUi
// kep: http://kep.k8s.io/4004
// alpha: v1.29
// DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node
DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion"
// owner: @pohly
// kep: http://kep.k8s.io/3063
@ -280,15 +289,6 @@ const (
// This flag used to be needed for dockershim CRI and currently does nothing.
ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting"
// owner: @yuzhiquan, @bowei, @PxyUp, @SergeyKanzhelev
// kep: https://kep.k8s.io/2727
// alpha: v1.23
// beta: v1.24
// stable: v1.27
//
// Enables GRPC probe method for {Liveness,Readiness,Startup}Probe.
GRPCContainerProbe featuregate.Feature = "GRPCContainerProbe"
// owner: @bobbypage
// alpha: v1.20
// beta: v1.21
@ -385,19 +385,11 @@ const (
// owner: @mimowo
// kep: https://kep.k8s.io/3850
// alpha: v1.28
// beta: v1.29
//
// Allows users to specify counting of failed pods per index.
JobBackoffLimitPerIndex featuregate.Feature = "JobBackoffLimitPerIndex"
// owner: @ahg
// beta: v1.23
// stable: v1.27
//
// Allow updating node scheduling directives in the pod template of jobs. Specifically,
// node affinity, selector and tolerations. This is allowed only for suspended jobs
// that have never been unsuspended before.
JobMutableNodeSchedulingDirectives featuregate.Feature = "JobMutableNodeSchedulingDirectives"
// owner: @mimowo
// kep: https://kep.k8s.io/3329
// alpha: v1.25
@ -410,6 +402,7 @@ const (
// owner: @kannon92
// kep : https://kep.k8s.io/3939
// alpha: v1.28
// beta: v1.29
//
// Allow users to specify recreating pods of a job only when
// pods have fully terminated.
@ -421,17 +414,6 @@ const (
// Track the number of pods with Ready condition in the Job status.
JobReadyPods featuregate.Feature = "JobReadyPods"
// owner: @alculquicondor
// alpha: v1.22
// beta: v1.23
// stable: v1.26
//
// Track Job completion without relying on Pod remaining in the cluster
// indefinitely. Pod finalizers, in addition to a field in the Job status
// allow the Job controller to keep track of Pods that it didn't account for
// yet.
JobTrackingWithFinalizers featuregate.Feature = "JobTrackingWithFinalizers"
// owner: @marquiz
// kep: http://kep.k8s.io/4033
// alpha: v1.28
@ -478,6 +460,12 @@ const (
// Enable POD resources API to return allocatable resources
KubeletPodResourcesGetAllocatable featuregate.Feature = "KubeletPodResourcesGetAllocatable"
// KubeletSeparateDiskGC enables Kubelet to garbage collection images/containers on different filesystems
// owner: @kannon92
// kep: https://kep.k8s.io/4191
// alpha: v1.29
KubeletSeparateDiskGC featuregate.Feature = "KubeletSeparateDiskGC"
// owner: @sallyom
// kep: https://kep.k8s.io/2832
// alpha: v1.25
@ -494,14 +482,6 @@ const (
// `externalTrafficPolicy: Cluster` services.
KubeProxyDrainingTerminatingNodes featuregate.Feature = "KubeProxyDrainingTerminatingNodes"
// owner: @zshihang
// kep: https://kep.k8s.io/2800
// beta: v1.24
// ga: v1.26
//
// Stop auto-generation of secret-based service account tokens.
LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration"
// owner: @zshihang
// kep: http://kep.k8s.io/2800
// alpha: v1.26
@ -513,6 +493,7 @@ const (
// owner: @yt2985
// kep: http://kep.k8s.io/2800
// alpha: v1.28
// beta: v1.29
//
// Enables cleaning up of secret-based service account tokens.
LegacyServiceAccountTokenCleanUp featuregate.Feature = "LegacyServiceAccountTokenCleanUp"
@ -531,6 +512,13 @@ const (
// Enables scaling down replicas via logarithmic comparison of creation/ready timestamps
LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown"
// owner: @sanposhiho
// kep: https://kep.k8s.io/3633
// alpha: v1.29
//
// Enables the MatchLabelKeys and MismatchLabelKeys in PodAffinity and PodAntiAffinity.
MatchLabelKeysInPodAffinity featuregate.Feature = "MatchLabelKeysInPodAffinity"
// owner: @denkensk
// kep: https://kep.k8s.io/3243
// alpha: v1.25
@ -574,13 +562,6 @@ const (
// Enables new performance-improving code in kube-proxy iptables mode
MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore"
// owner: @sarveshr7
// kep: https://kep.k8s.io/2593
// alpha: v1.25
//
// Enables the MultiCIDR Range allocator.
MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator"
// owner: @aojea
// kep: https://kep.k8s.io/1880
// alpha: v1.27
@ -595,6 +576,13 @@ const (
// Robust VolumeManager reconstruction after kubelet restart.
NewVolumeManagerReconstruction featuregate.Feature = "NewVolumeManagerReconstruction"
// owner: @danwinship
// kep: https://kep.k8s.io/3866
// alpha: v1.29
//
// Allows running kube-proxy with `--mode nftables`.
NFTablesProxyMode featuregate.Feature = "NFTablesProxyMode"
// owner: @aravindhp @LorbusChris
// kep: http://kep.k8s.io/2271
// alpha: v1.27
@ -664,8 +652,9 @@ const (
// Set pod completion index as a pod label for Indexed Jobs.
PodIndexLabel featuregate.Feature = "PodIndexLabel"
// owner: @ddebroy
// owner: @ddebroy, @kannon92
// alpha: v1.25
// beta: v1.29
//
// Enables reporting of PodReadyToStartContainersCondition condition in pod status after pod
// sandbox creation and network configuration completes successfully
@ -674,10 +663,18 @@ const (
// owner: @wzshiming
// kep: http://kep.k8s.io/2681
// alpha: v1.28
// beta: v1.29
//
// Adds pod.status.hostIPs and downward API
PodHostIPs featuregate.Feature = "PodHostIPs"
// owner: @AxeZhan
// kep: http://kep.k8s.io/3960
// alpha: v1.29
//
// Enables SleepAction in container lifecycle hooks
PodLifecycleSleepAction featuregate.Feature = "PodLifecycleSleepAction"
// owner: @Huang-Wei
// kep: https://kep.k8s.io/3521
// alpha: v1.26
@ -686,14 +683,6 @@ const (
// Enable users to specify when a Pod is ready for scheduling.
PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness"
// owner: @rphillips
// alpha: v1.21
// beta: v1.22
// ga: v1.28
//
// Allows user to override pod-level terminationGracePeriod for probes
ProbeTerminationGracePeriod featuregate.Feature = "ProbeTerminationGracePeriod"
// owner: @jessfraz
// alpha: v1.12
//
@ -720,6 +709,7 @@ const (
// kep: https://kep.k8s.io/2485
// alpha: v1.22
// beta: v1.27
// GA: v1.29
//
// Enables usage of the ReadWriteOncePod PersistentVolume access mode.
ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod"
@ -731,15 +721,6 @@ const (
// Allow users to recover from volume expansion failure
RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure"
// owner: @RomanBednar
// kep: https://kep.k8s.io/3333
// alpha: v1.25
// beta: 1.26
// stable: v1.28
//
// Allow assigning StorageClass to unbound PVCs retroactively
RetroactiveDefaultStorageClass featuregate.Feature = "RetroactiveDefaultStorageClass"
// owner: @mikedanese
// alpha: v1.7
// beta: v1.12
@ -749,6 +730,13 @@ const (
// certificate as expiration approaches.
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
// owner: @kiashok
// kep: https://kep.k8s.io/4216
// alpha: v1.29
//
// Adds support to pull images based on the runtime class specified.
RuntimeClassInImageCriAPI featuregate.Feature = "RuntimeClassInImageCriApi"
// owner: @danielvegamyhre
// kep: https://kep.k8s.io/2413
// beta: v1.27
@ -759,22 +747,13 @@ const (
ElasticIndexedJob featuregate.Feature = "ElasticIndexedJob"
// owner: @sanposhiho
// kep: http://kep.k8s.io/3063
// kep: http://kep.k8s.io/4247
// beta: v1.28
//
// Enables the scheduler's enhancement called QueueingHints,
// which benefits to reduce the useless requeueing.
SchedulerQueueingHints featuregate.Feature = "SchedulerQueueingHints"
// owner: @saschagrunert
// kep: https://kep.k8s.io/2413
// alpha: v1.22
// beta: v1.25
// ga: v1.27
//
// Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads.
SeccompDefault featuregate.Feature = "SeccompDefault"
// owner: @mtardy
// alpha: v1.0
//
@ -783,10 +762,48 @@ const (
// https://github.com/kubernetes/kubernetes/issues/111516
SecurityContextDeny featuregate.Feature = "SecurityContextDeny"
// owner: @atosatto @yuanchen8911
// kep: http://kep.k8s.io/3902
// beta: v1.29
//
// Decouples Taint Eviction Controller, performing taint-based Pod eviction, from Node Lifecycle Controller.
SeparateTaintEvictionController featuregate.Feature = "SeparateTaintEvictionController"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
// alpha: v1.29
//
// Controls whether JTIs (UUIDs) are embedded into generated service account tokens, and whether these JTIs are
// recorded into the audit log for future requests made by these tokens.
ServiceAccountTokenJTI featuregate.Feature = "ServiceAccountTokenJTI"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
// alpha: v1.29
//
// Controls whether the apiserver supports binding service account tokens to Node objects.
ServiceAccountTokenNodeBinding featuregate.Feature = "ServiceAccountTokenNodeBinding"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
// alpha: v1.29
//
// Controls whether the apiserver will validate Node claims in service account tokens.
ServiceAccountTokenNodeBindingValidation featuregate.Feature = "ServiceAccountTokenNodeBindingValidation"
// owner: @munnerz
// kep: http://kep.k8s.io/4193
// alpha: v1.29
//
// Controls whether the apiserver embeds the node name and uid for the associated node when issuing
// service account tokens bound to Pod objects.
ServiceAccountTokenPodNodeInfo featuregate.Feature = "ServiceAccountTokenPodNodeInfo"
// owner: @xuzhenglun
// kep: http://kep.k8s.io/3682
// alpha: v1.27
// beta: v1.28
// stable: v1.29
//
// Subdivide the NodePort range for dynamic and static port allocation.
ServiceNodePortStaticSubrange featuregate.Feature = "ServiceNodePortStaticSubrange"
@ -837,14 +854,6 @@ const (
// Enables topology aware hints for EndpointSlices
TopologyAwareHints featuregate.Feature = "TopologyAwareHints"
// owner: @lmdaly, @swatisehgal (for GA graduation)
// alpha: v1.16
// beta: v1.18
// GA: v1.27
//
// Enable resource managers to make NUMA aligned decisions
TopologyManager featuregate.Feature = "TopologyManager"
// owner: @PiotrProkop
// kep: https://kep.k8s.io/3545
// alpha: v1.26
@ -886,6 +895,13 @@ const (
// Enables user namespace support for stateless pods.
UserNamespacesSupport featuregate.Feature = "UserNamespacesSupport"
// owner: @mattcarry, @sunnylovestiramisu
// kep: https://kep.k8s.io/3751
// alpha: v1.29
//
// Enables user specified volume attributes for persistent volumes, like iops and throughput.
VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass"
// owner: @cofyc
// alpha: v1.21
VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority"
@ -934,6 +950,31 @@ const (
//
// Enables In-Place Pod Vertical Scaling
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
// owner: @Sh4d1,@RyanAoh
// kep: http://kep.k8s.io/1860
// alpha: v1.29
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
LoadBalancerIPMode featuregate.Feature = "LoadBalancerIPMode"
// owner: @haircommander
// kep: http://kep.k8s.io/4210
// alpha: v1.29
// ImageMaximumGCAge enables the Kubelet configuration field of the same name, allowing an admin
// to specify the age after which an image will be garbage collected.
ImageMaximumGCAge featuregate.Feature = "ImageMaximumGCAge"
// owner: @saschagrunert
// alpha: v1.28
//
// Enables user namespace support for Pod Security Standards. Enabling this
// feature will modify all Pod Security Standard rules to allow setting:
// spec[.*].securityContext.[runAsNonRoot,runAsUser]
// This feature gate should only be enabled if all nodes in the cluster
// support the user namespace feature and have it enabled. The feature gate
// will not graduate or be enabled by default in future Kubernetes
// releases.
UserNamespacesPodSecurityStandards featuregate.Feature = "UserNamespacesPodSecurityStandards"
)
func init() {
@ -949,16 +990,20 @@ func init() {
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
CrossNamespaceVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha},
AllowServiceLBStatusOnNonLB: {Default: false, PreRelease: featuregate.Deprecated}, // remove after 1.29
AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24
APISelfSubjectReview: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.28; remove in 1.30
AppArmor: {Default: true, PreRelease: featuregate.Beta},
CloudDualStackNodeIPs: {Default: false, PreRelease: featuregate.Alpha},
CloudDualStackNodeIPs: {Default: true, PreRelease: featuregate.Beta},
ClusterTrustBundle: {Default: false, PreRelease: featuregate.Alpha},
ClusterTrustBundleProjection: {Default: false, PreRelease: featuregate.Alpha},
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
CPUManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26
@ -975,13 +1020,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CSIMigrationRBD: {Default: false, PreRelease: featuregate.Deprecated}, // deprecated in 1.28, remove in 1.31
CSIMigrationvSphere: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
CSINodeExpandSecret: {Default: true, PreRelease: featuregate.Beta},
CSINodeExpandSecret: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha},
SkipReadOnlyValidationGCE: {Default: false, PreRelease: featuregate.Alpha},
SkipReadOnlyValidationGCE: {Default: true, PreRelease: featuregate.Deprecated}, // remove in 1.31
TranslateStreamCloseWebsocketRequests: {Default: false, PreRelease: featuregate.Alpha},
CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha},
@ -991,17 +1036,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CronJobsScheduledAnnotation: {Default: true, PreRelease: featuregate.Beta},
CronJobTimeZone: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
DefaultHostNetworkHostPortsInPodTemplates: {Default: false, PreRelease: featuregate.Deprecated},
DisableCloudProviders: {Default: false, PreRelease: featuregate.Alpha},
DisableCloudProviders: {Default: true, PreRelease: featuregate.Beta},
DisableKubeletCloudCredentialProviders: {Default: false, PreRelease: featuregate.Alpha},
DisableKubeletCloudCredentialProviders: {Default: true, PreRelease: featuregate.Beta},
DevicePluginCDIDevices: {Default: false, PreRelease: featuregate.Alpha},
DisableNodeKubeProxyVersion: {Default: false, PreRelease: featuregate.Alpha},
DownwardAPIHugePages: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.29
DevicePluginCDIDevices: {Default: true, PreRelease: featuregate.Beta},
DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha},
@ -1013,8 +1056,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.30
GRPCContainerProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, //remove in 1.29
GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta},
GracefulNodeShutdownBasedOnPodPriority: {Default: true, PreRelease: featuregate.Beta},
@ -1041,17 +1082,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
IPTablesOwnershipCleanup: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30
JobBackoffLimitPerIndex: {Default: false, PreRelease: featuregate.Alpha},
JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
JobBackoffLimitPerIndex: {Default: true, PreRelease: featuregate.Beta},
JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
JobPodReplacementPolicy: {Default: false, PreRelease: featuregate.Alpha},
JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta},
JobReadyPods: {Default: true, PreRelease: featuregate.Beta},
JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
JobReadyPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
KubeletCgroupDriverFromCRI: {Default: false, PreRelease: featuregate.Alpha},
@ -1065,20 +1102,22 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
KubeletPodResourcesGetAllocatable: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.28, remove in 1.30
KubeletSeparateDiskGC: {Default: false, PreRelease: featuregate.Alpha},
KubeletTracing: {Default: true, PreRelease: featuregate.Beta},
KubeProxyDrainingTerminatingNodes: {Default: false, PreRelease: featuregate.Alpha},
LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30
LegacyServiceAccountTokenCleanUp: {Default: false, PreRelease: featuregate.Alpha},
LegacyServiceAccountTokenCleanUp: {Default: true, PreRelease: featuregate.Beta},
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
LogarithmicScaleDown: {Default: true, PreRelease: featuregate.Beta},
MatchLabelKeysInPodAffinity: {Default: false, PreRelease: featuregate.Alpha},
MatchLabelKeysInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
MaxUnavailableStatefulSet: {Default: false, PreRelease: featuregate.Alpha},
@ -1091,12 +1130,12 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
MinimizeIPTablesRestore: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30
MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha},
MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha},
NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.Beta},
NFTablesProxyMode: {Default: false, PreRelease: featuregate.Alpha},
NodeLogQuery: {Default: false, PreRelease: featuregate.Alpha},
NodeOutOfServiceVolumeDetach: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
@ -1105,7 +1144,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
PDBUnhealthyPodEvictionPolicy: {Default: true, PreRelease: featuregate.Beta},
PersistentVolumeLastPhaseTransitionTime: {Default: false, PreRelease: featuregate.Alpha},
PersistentVolumeLastPhaseTransitionTime: {Default: true, PreRelease: featuregate.Beta},
PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha},
@ -1113,39 +1152,47 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta},
PodReadyToStartContainersCondition: {Default: false, PreRelease: featuregate.Alpha},
PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta},
PodHostIPs: {Default: false, PreRelease: featuregate.Alpha},
PodHostIPs: {Default: true, PreRelease: featuregate.Beta},
PodLifecycleSleepAction: {Default: false, PreRelease: featuregate.Alpha},
PodSchedulingReadiness: {Default: true, PreRelease: featuregate.Beta},
ProbeTerminationGracePeriod: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
ProcMountType: {Default: false, PreRelease: featuregate.Alpha},
ProxyTerminatingEndpoints: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
ReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta},
ReadWriteOncePod: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha},
RetroactiveDefaultStorageClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
RuntimeClassInImageCriAPI: {Default: false, PreRelease: featuregate.Alpha},
ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta},
SchedulerQueueingHints: {Default: true, PreRelease: featuregate.Beta},
SeccompDefault: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
SchedulerQueueingHints: {Default: false, PreRelease: featuregate.Beta},
SecurityContextDeny: {Default: false, PreRelease: featuregate.Alpha},
ServiceNodePortStaticSubrange: {Default: true, PreRelease: featuregate.Beta},
SeparateTaintEvictionController: {Default: true, PreRelease: featuregate.Beta},
SidecarContainers: {Default: false, PreRelease: featuregate.Alpha},
ServiceAccountTokenJTI: {Default: false, PreRelease: featuregate.Alpha},
ServiceAccountTokenPodNodeInfo: {Default: false, PreRelease: featuregate.Alpha},
ServiceAccountTokenNodeBinding: {Default: false, PreRelease: featuregate.Alpha},
ServiceAccountTokenNodeBindingValidation: {Default: false, PreRelease: featuregate.Alpha},
ServiceNodePortStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.29; remove in 1.31
SidecarContainers: {Default: true, PreRelease: featuregate.Beta},
SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta},
@ -1157,8 +1204,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta},
TopologyManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.27; remove in 1.29
TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha},
TopologyManagerPolicyBetaOptions: {Default: true, PreRelease: featuregate.Beta},
@ -1167,6 +1212,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
UnknownVersionInteroperabilityProxy: {Default: false, PreRelease: featuregate.Alpha},
VolumeAttributesClass: {Default: false, PreRelease: featuregate.Alpha},
VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha},
UserNamespacesSupport: {Default: false, PreRelease: featuregate.Alpha},
@ -1185,6 +1232,12 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
PodIndexLabel: {Default: true, PreRelease: featuregate.Beta},
LoadBalancerIPMode: {Default: false, PreRelease: featuregate.Alpha},
ImageMaximumGCAge: {Default: false, PreRelease: featuregate.Alpha},
UserNamespacesPodSecurityStandards: {Default: false, PreRelease: featuregate.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
@ -1192,25 +1245,33 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
genericfeatures.AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.KMSv1: {Default: false, PreRelease: featuregate.Deprecated},
genericfeatures.KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
genericfeatures.KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
genericfeatures.ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Beta},
genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: false, PreRelease: featuregate.Beta},
genericfeatures.StructuredAuthorizationConfiguration: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.ZeroLimitedNominalConcurrencyShares: {Default: false, PreRelease: featuregate.Beta},
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:

View File

@ -0,0 +1,173 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
// DefaultFs implements Filesystem using same-named functions from "os" and "io"
type DefaultFs struct {
root string
}
var _ Filesystem = &DefaultFs{}
// NewTempFs returns a fake Filesystem in temporary directory, useful for unit tests
func NewTempFs() Filesystem {
path, _ := os.MkdirTemp("", "tmpfs")
return &DefaultFs{
root: path,
}
}
func (fs *DefaultFs) prefix(path string) string {
if len(fs.root) == 0 {
return path
}
return filepath.Join(fs.root, path)
}
// Stat via os.Stat
func (fs *DefaultFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(fs.prefix(name))
}
// Create via os.Create
func (fs *DefaultFs) Create(name string) (File, error) {
file, err := os.Create(fs.prefix(name))
if err != nil {
return nil, err
}
return &defaultFile{file}, nil
}
// Rename via os.Rename
func (fs *DefaultFs) Rename(oldpath, newpath string) error {
if !strings.HasPrefix(oldpath, fs.root) {
oldpath = fs.prefix(oldpath)
}
if !strings.HasPrefix(newpath, fs.root) {
newpath = fs.prefix(newpath)
}
return os.Rename(oldpath, newpath)
}
// MkdirAll via os.MkdirAll
func (fs *DefaultFs) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(fs.prefix(path), perm)
}
// MkdirAllWithPathCheck checks if path exists already. If not, it creates a directory
// named path, along with any necessary parents, and returns nil, or else returns an error.
// Permission bits perm (before umask) are used for all directories that
// MkdirAllWithPathCheck creates.
// If path is already a directory, MkdirAllWithPathCheck does nothing and returns nil.
// NOTE: In case of Windows NTFS, mount points are implemented as reparse-point
// (similar to symlink) and do not represent actual directory. Hence Directory existence
// check for windows NTFS will NOT check for dir, but for symlink presence.
func MkdirAllWithPathCheck(path string, perm os.FileMode) error {
if dir, err := os.Lstat(path); err == nil {
// If the path exists already,
// 1. for Unix/Linux OS, check if the path is directory.
// 2. for windows NTFS, check if the path is symlink instead of directory.
if dir.IsDir() ||
(runtime.GOOS == "windows" && (dir.Mode()&os.ModeSymlink != 0)) {
return nil
}
return fmt.Errorf("path %v exists but is not a directory", path)
}
// If existence of path not known, attempt to create it.
if err := os.MkdirAll(path, perm); err != nil {
return err
}
return nil
}
// Chtimes via os.Chtimes
func (fs *DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(fs.prefix(name), atime, mtime)
}
// RemoveAll via os.RemoveAll
func (fs *DefaultFs) RemoveAll(path string) error {
return os.RemoveAll(fs.prefix(path))
}
// Remove via os.Remove
func (fs *DefaultFs) Remove(name string) error {
return os.Remove(fs.prefix(name))
}
// ReadFile via os.ReadFile
func (fs *DefaultFs) ReadFile(filename string) ([]byte, error) {
return os.ReadFile(fs.prefix(filename))
}
// TempDir via os.MkdirTemp
func (fs *DefaultFs) TempDir(dir, prefix string) (string, error) {
return os.MkdirTemp(fs.prefix(dir), prefix)
}
// TempFile via os.CreateTemp
func (fs *DefaultFs) TempFile(dir, prefix string) (File, error) {
file, err := os.CreateTemp(fs.prefix(dir), prefix)
if err != nil {
return nil, err
}
return &defaultFile{file}, nil
}
// ReadDir via os.ReadDir
func (fs *DefaultFs) ReadDir(dirname string) ([]os.DirEntry, error) {
return os.ReadDir(fs.prefix(dirname))
}
// Walk via filepath.Walk
func (fs *DefaultFs) Walk(root string, walkFn filepath.WalkFunc) error {
return filepath.Walk(fs.prefix(root), walkFn)
}
// defaultFile implements File using same-named functions from "os"
type defaultFile struct {
file *os.File
}
// Name via os.File.Name
func (file *defaultFile) Name() string {
return file.file.Name()
}
// Write via os.File.Write
func (file *defaultFile) Write(b []byte) (n int, err error) {
return file.file.Write(b)
}
// Sync via os.File.Sync
func (file *defaultFile) Sync() error {
return file.file.Sync()
}
// Close via os.File.Close
func (file *defaultFile) Close() error {
return file.file.Close()
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"os"
"path/filepath"
"time"
)
// Filesystem is an interface that we can use to mock various filesystem operations
type Filesystem interface {
// from "os"
Stat(name string) (os.FileInfo, error)
Create(name string) (File, error)
Rename(oldpath, newpath string) error
MkdirAll(path string, perm os.FileMode) error
Chtimes(name string, atime time.Time, mtime time.Time) error
RemoveAll(path string) error
Remove(name string) error
// from "os"
ReadFile(filename string) ([]byte, error)
TempDir(dir, prefix string) (string, error)
TempFile(dir, prefix string) (File, error)
ReadDir(dirname string) ([]os.DirEntry, error)
Walk(root string, walkFn filepath.WalkFunc) error
}
// File is an interface that we can use to mock various filesystem operations typically
// accessed through the File object from the "os" package
type File interface {
// for now, the only os.File methods used are those below, add more as necessary
Name() string
Write(b []byte) (n int, err error)
Sync() error
Close() error
}

View File

@ -0,0 +1,37 @@
//go:build freebsd || linux || darwin
// +build freebsd linux darwin
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"fmt"
"os"
)
// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file
func IsUnixDomainSocket(filePath string) (bool, error) {
fi, err := os.Stat(filePath)
if err != nil {
return false, fmt.Errorf("stat file %s failed: %v", filePath, err)
}
if fi.Mode()&os.ModeSocket == 0 {
return false, nil
}
return true, nil
}

View File

@ -0,0 +1,87 @@
//go:build windows
// +build windows
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"fmt"
"net"
"os"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
)
const (
// Amount of time to wait between attempting to use a Unix domain socket.
// As detailed in https://github.com/kubernetes/kubernetes/issues/104584
// the first attempt will most likely fail, hence the need to retry
socketDialRetryPeriod = 1 * time.Second
// Overall timeout value to dial a Unix domain socket, including retries
socketDialTimeout = 4 * time.Second
)
// IsUnixDomainSocket returns whether a given file is a AF_UNIX socket file
// Note that due to the retry logic inside, it could take up to 4 seconds
// to determine whether or not the file path supplied is a Unix domain socket
func IsUnixDomainSocket(filePath string) (bool, error) {
// Due to the absence of golang support for os.ModeSocket in Windows (https://github.com/golang/go/issues/33357)
// we need to dial the file and check if we receive an error to determine if a file is Unix Domain Socket file.
// Note that querrying for the Reparse Points (https://docs.microsoft.com/en-us/windows/win32/fileio/reparse-points)
// for the file (using FSCTL_GET_REPARSE_POINT) and checking for reparse tag: reparseTagSocket
// does NOT work in 1809 if the socket file is created within a bind mounted directory by a container
// and the FSCTL is issued in the host by the kubelet.
// If the file does not exist, it cannot be a Unix domain socket.
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return false, fmt.Errorf("File %s not found. Err: %v", filePath, err)
}
klog.V(6).InfoS("Function IsUnixDomainSocket starts", "filePath", filePath)
// As detailed in https://github.com/kubernetes/kubernetes/issues/104584 we cannot rely
// on the Unix Domain socket working on the very first try, hence the potential need to
// dial multiple times
var lastSocketErr error
err := wait.PollImmediate(socketDialRetryPeriod, socketDialTimeout,
func() (bool, error) {
klog.V(6).InfoS("Dialing the socket", "filePath", filePath)
var c net.Conn
c, lastSocketErr = net.Dial("unix", filePath)
if lastSocketErr == nil {
c.Close()
klog.V(6).InfoS("Socket dialed successfully", "filePath", filePath)
return true, nil
}
klog.V(6).InfoS("Failed the current attempt to dial the socket, so pausing before retry",
"filePath", filePath, "err", lastSocketErr, "socketDialRetryPeriod",
socketDialRetryPeriod)
return false, nil
})
// PollImmediate will return "timed out waiting for the condition" if the function it
// invokes never returns true
if err != nil {
klog.V(2).InfoS("Failed all attempts to dial the socket so marking it as a non-Unix Domain socket. Last socket error along with the error from PollImmediate follow",
"filePath", filePath, "lastSocketErr", lastSocketErr, "err", err)
return false, nil
}
return true, nil
}

View File

@ -0,0 +1,89 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filesystem
import (
"github.com/fsnotify/fsnotify"
)
// FSWatcher is a callback-based filesystem watcher abstraction for fsnotify.
type FSWatcher interface {
// Initializes the watcher with the given watch handlers.
// Called before all other methods.
Init(FSEventHandler, FSErrorHandler) error
// Starts listening for events and errors.
// When an event or error occurs, the corresponding handler is called.
Run()
// Add a filesystem path to watch
AddWatch(path string) error
}
// FSEventHandler is called when a fsnotify event occurs.
type FSEventHandler func(event fsnotify.Event)
// FSErrorHandler is called when a fsnotify error occurs.
type FSErrorHandler func(err error)
type fsnotifyWatcher struct {
watcher *fsnotify.Watcher
eventHandler FSEventHandler
errorHandler FSErrorHandler
}
var _ FSWatcher = &fsnotifyWatcher{}
// NewFsnotifyWatcher returns an implementation of FSWatcher that continuously listens for
// fsnotify events and calls the event handler as soon as an event is received.
func NewFsnotifyWatcher() FSWatcher {
return &fsnotifyWatcher{}
}
func (w *fsnotifyWatcher) AddWatch(path string) error {
return w.watcher.Add(path)
}
func (w *fsnotifyWatcher) Init(eventHandler FSEventHandler, errorHandler FSErrorHandler) error {
var err error
w.watcher, err = fsnotify.NewWatcher()
if err != nil {
return err
}
w.eventHandler = eventHandler
w.errorHandler = errorHandler
return nil
}
func (w *fsnotifyWatcher) Run() {
go func() {
defer w.watcher.Close()
for {
select {
case event := <-w.watcher.Events:
if w.eventHandler != nil {
w.eventHandler(event)
}
case err := <-w.watcher.Errors:
if w.errorHandler != nil {
w.errorHandler(err)
}
}
}
}()
}

View File

@ -23,7 +23,7 @@ import (
// Import the crypto/sha512 algorithm for the docker image parser to work with 384 and 512 sha hashes
_ "crypto/sha512"
dockerref "github.com/docker/distribution/reference"
dockerref "github.com/distribution/reference"
)
// ParseImageName parses a docker image string into three parts: repo, tag and digest.

View File

@ -333,6 +333,13 @@ type KubeletVolumeHost interface {
WaitForCacheSync() error
// Returns hostutil.HostUtils
GetHostUtil() hostutil.HostUtils
// Returns trust anchors from the named ClusterTrustBundle.
GetTrustAnchorsByName(name string, allowMissing bool) ([]byte, error)
// Returns trust anchors from the ClusterTrustBundles selected by signer
// name and label selector.
GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error)
}
// AttachDetachVolumeHost is a AttachDetach Controller specific interface that plugins can use
@ -1057,7 +1064,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod {
Containers: []v1.Container{
{
Name: "pv-recycler",
Image: "registry.k8s.io/debian-base:v2.0.0",
Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.0",
Command: []string{"/bin/sh"},
Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"},
VolumeMounts: []v1.VolumeMount{

View File

@ -21,12 +21,16 @@ package hostutil
import (
"fmt"
"io/fs"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"golang.org/x/sys/windows"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/util/filesystem"
"k8s.io/mount-utils"
utilpath "k8s.io/utils/path"
)
@ -87,9 +91,28 @@ func (hu *HostUtil) MakeRShared(path string) error {
return nil
}
func isSystemCannotAccessErr(err error) bool {
if fserr, ok := err.(*fs.PathError); ok {
errno, ok := fserr.Err.(syscall.Errno)
return ok && errno == windows.ERROR_CANT_ACCESS_FILE
}
return false
}
// GetFileType checks for sockets/block/character devices
func (hu *(HostUtil)) GetFileType(pathname string) (FileType, error) {
return getFileType(pathname)
filetype, err := getFileType(pathname)
// os.Stat will return a 1920 error (windows.ERROR_CANT_ACCESS_FILE) if we use it on a Unix Socket
// on Windows. In this case, we need to use a different method to check if it's a Unix Socket.
if isSystemCannotAccessErr(err) {
if isSocket, errSocket := filesystem.IsUnixDomainSocket(pathname); errSocket == nil && isSocket {
return FileTypeSocket, nil
}
}
return filetype, err
}
// PathExists checks whether the path exists

View File

@ -168,10 +168,6 @@ func SupportsSELinuxContextMount(volumeSpec *volume.Spec, volumePluginMgr *volum
// VolumeSupportsSELinuxMount returns true if given volume access mode can support mount with SELinux mount options.
func VolumeSupportsSELinuxMount(volumeSpec *volume.Spec) bool {
// Right now, SELinux mount is supported only for ReadWriteOncePod volumes.
if !utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod) {
return false
}
if !utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
return false
}

View File

@ -0,0 +1,72 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"sort"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
storagev1alpha1listers "k8s.io/client-go/listers/storage/v1alpha1"
"k8s.io/klog/v2"
)
const (
// AlphaIsDefaultVolumeAttributesClassAnnotation is the alpha version of IsDefaultVolumeAttributesClassAnnotation.
AlphaIsDefaultVolumeAttributesClassAnnotation = "volumeattributesclass.alpha.kubernetes.io/is-default-class"
)
// GetDefaultVolumeAttributesClass returns the default VolumeAttributesClass from the store, or nil.
func GetDefaultVolumeAttributesClass(lister storagev1alpha1listers.VolumeAttributesClassLister, driverName string) (*storagev1alpha1.VolumeAttributesClass, error) {
list, err := lister.List(labels.Everything())
if err != nil {
return nil, err
}
defaultClasses := []*storagev1alpha1.VolumeAttributesClass{}
for _, class := range list {
if IsDefaultVolumeAttributesClassAnnotation(class.ObjectMeta) && class.DriverName == driverName {
defaultClasses = append(defaultClasses, class)
klog.V(4).Infof("GetDefaultVolumeAttributesClass added: %s", class.Name)
}
}
if len(defaultClasses) == 0 {
return nil, nil
}
// Primary sort by creation timestamp, newest first
// Secondary sort by class name, ascending order
sort.Slice(defaultClasses, func(i, j int) bool {
if defaultClasses[i].CreationTimestamp.UnixNano() == defaultClasses[j].CreationTimestamp.UnixNano() {
return defaultClasses[i].Name < defaultClasses[j].Name
}
return defaultClasses[i].CreationTimestamp.UnixNano() > defaultClasses[j].CreationTimestamp.UnixNano()
})
if len(defaultClasses) > 1 {
klog.V(4).Infof("%d default VolumeAttributesClass were found, choosing: %s", len(defaultClasses), defaultClasses[0].Name)
}
return defaultClasses[0], nil
}
// IsDefaultVolumeAttributesClassAnnotation returns a boolean if the default
// volume attributes class annotation is set
func IsDefaultVolumeAttributesClassAnnotation(obj metav1.ObjectMeta) bool {
return obj.Annotations[AlphaIsDefaultVolumeAttributesClassAnnotation] == "true"
}

View File

@ -4,21 +4,62 @@ rules:
# The following packages are okay to use:
#
# public API
- selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)/|^[a-z]+(/|$)|github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
- selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)
allowedPrefixes: [ "" ]
# stdlib
- selectorRegexp: ^[a-z]+(/|$)
allowedPrefixes: [ "" ]
# Ginkgo + Gomega.
- selectorRegexp: github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
# stdlib x and proto
- selectorRegexp: ^golang.org/x|^google.golang.org/protobuf
allowedPrefixes: [ "" ]
# Ginkgo + Gomega
- selectorRegexp: ^github.com/onsi/(ginkgo|gomega)
allowedPrefixes: [ "" ]
# kube-openapi
- selectorRegexp: ^k8s.io/kube-openapi
allowedPrefixes: [ "" ]
# Public SIG Repos
- selectorRegexp: ^sigs.k8s.io/(json|yaml|structured-merge-diff)
allowedPrefixes: [ "" ]
# some of the shared test helpers (but not E2E sub-packages!)
- selectorRegexp: ^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# Third party deps
- selectorRegexp: ^github.com/|^gopkg.in
allowedPrefixes: [
"gopkg.in/inf.v0",
"gopkg.in/yaml.v2",
"github.com/blang/semver/",
"github.com/davecgh/go-spew/spew",
"github.com/evanphx/json-patch",
"github.com/go-logr/logr",
"github.com/gogo/protobuf/proto",
"github.com/gogo/protobuf/sortkeys",
"github.com/golang/protobuf/proto",
"github.com/google/gnostic-models/openapiv2",
"github.com/google/gnostic-models/openapiv3",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/google/gofuzz",
"github.com/google/uuid",
"github.com/imdario/mergo",
"github.com/prometheus/client_golang/",
"github.com/prometheus/client_model/",
"github.com/prometheus/common/",
"github.com/prometheus/procfs",
"github.com/spf13/cobra",
"github.com/spf13/pflag",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require"
]
# Everything else isn't.
#
# In particular importing any test/e2e/framework/* package would be a

View File

@ -2,7 +2,6 @@
approvers:
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
@ -10,7 +9,6 @@ approvers:
reviewers:
- sig-testing-reviewers
- andrewsykim
- fabriziopandini
- pohly
- oomichi
- neolit123
@ -18,4 +16,5 @@ reviewers:
labels:
- area/e2e-test-framework
emeritus_approvers:
- fabriziopandini
- timothysc

View File

@ -4,7 +4,7 @@ The Kubernetes E2E framework simplifies writing Ginkgo tests suites. It's main
usage is for these tests suites in the Kubernetes repository itself:
- test/e2e: runs as client for a Kubernetes cluster. The e2e.test binary is
used for conformance testing.
- test/e2e_node: runs on the same node as a kublet instance. Used for testing
- test/e2e_node: runs on the same node as a kubelet instance. Used for testing
kubelet.
- test/e2e_kubeadm: test suite for kubeadm.

108
vendor/k8s.io/kubernetes/test/e2e/framework/bugs.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"github.com/onsi/ginkgo/v2/types"
)
var (
bugs []Bug
bugMutex sync.Mutex
)
// RecordBug stores information about a bug in the E2E suite source code that
// cannot be reported through ginkgo.Fail because it was found outside of some
// test, for example during test registration.
//
// This can be used instead of raising a panic. Then all bugs can be reported
// together instead of failing after the first one.
func RecordBug(bug Bug) {
bugMutex.Lock()
defer bugMutex.Unlock()
bugs = append(bugs, bug)
}
type Bug struct {
FileName string
LineNumber int
Message string
}
// NewBug creates a new bug with a location that is obtained by skipping a certain number
// of stack frames. Passing zero will record the source code location of the direct caller
// of NewBug.
func NewBug(message string, skip int) Bug {
location := types.NewCodeLocation(skip + 1)
return Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message}
}
// FormatBugs produces a report that includes all bugs recorded earlier via
// RecordBug. An error is returned with the report if there have been bugs.
func FormatBugs() error {
bugMutex.Lock()
defer bugMutex.Unlock()
if len(bugs) == 0 {
return nil
}
lines := make([]string, 0, len(bugs))
wd, err := os.Getwd()
if err != nil {
return fmt.Errorf("get current directory: %v", err)
}
// Sort by file name, line number, message. For the sake of simplicity
// this uses the full file name even though the output the may use a
// relative path. Usually the result should be the same because full
// paths will all have the same prefix.
sort.Slice(bugs, func(i, j int) bool {
switch strings.Compare(bugs[i].FileName, bugs[j].FileName) {
case -1:
return true
case 1:
return false
}
if bugs[i].LineNumber < bugs[j].LineNumber {
return true
}
if bugs[i].LineNumber > bugs[j].LineNumber {
return false
}
return bugs[i].Message < bugs[j].Message
})
for _, bug := range bugs {
// Use relative paths, if possible.
path := bug.FileName
if wd != "" {
if relpath, err := filepath.Rel(wd, bug.FileName); err == nil {
path = relpath
}
}
lines = append(lines, fmt.Sprintf("ERROR: %s:%d: %s\n", path, bug.LineNumber, strings.TrimSpace(bug.Message)))
}
return errors.New(strings.Join(lines, ""))
}

View File

@ -212,8 +212,9 @@ func newAsyncAssertion(ctx context.Context, args []interface{}, consistently boo
args: args,
// PodStart is used as default because waiting for a pod is the
// most common operation.
timeout: TestContext.timeouts.PodStart,
interval: TestContext.timeouts.Poll,
timeout: TestContext.timeouts.PodStart,
interval: TestContext.timeouts.Poll,
consistently: consistently,
}
}
@ -292,13 +293,6 @@ func (f *FailureError) backtrace() {
// }
var ErrFailure error = FailureError{}
// ExpectEqual expects the specified two are the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().To(gomega.Equal())
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
}
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().ToNot(gomega.Equal())
@ -362,24 +356,3 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
}
Fail(prefix+err.Error(), 1+offset)
}
// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter.
//
// Deprecated: use gomega.Expect().To(gomega.ConsistOf()) instead
func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...)
}
// ExpectHaveKey expects the actual map has the key in the keyset
//
// Deprecated: use gomega.Expect().To(gomega.HaveKey()) instead
func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...)
}
// ExpectEmpty expects actual is empty
//
// Deprecated: use gomega.Expect().To(gomega.BeEmpty()) instead
func ExpectEmpty(actual interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...)
}

View File

@ -17,13 +17,73 @@ limitations under the License.
package framework
import (
"fmt"
"path"
"reflect"
"regexp"
"slices"
"strings"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
)
// Feature is the name of a certain feature that the cluster under test must have.
// Such features are different from feature gates.
type Feature string
// Environment is the name for the environment in which a test can run, like
// "Linux" or "Windows".
type Environment string
// NodeFeature is the name of a feature that a node must support. To be
// removed, see
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-testing/3041-node-conformance-and-features#nodefeature.
type NodeFeature string
type Valid[T comparable] struct {
items sets.Set[T]
frozen bool
}
// Add registers a new valid item name. The expected usage is
//
// var SomeFeature = framework.ValidFeatures.Add("Some")
//
// during the init phase of an E2E suite. Individual tests should not register
// their own, to avoid uncontrolled proliferation of new items. E2E suites can,
// but don't have to, enforce that by freezing the set of valid names.
func (v *Valid[T]) Add(item T) T {
if v.frozen {
RecordBug(NewBug(fmt.Sprintf(`registry %T is already frozen, "%v" must not be added anymore`, *v, item), 1))
}
if v.items == nil {
v.items = sets.New[T]()
}
if v.items.Has(item) {
RecordBug(NewBug(fmt.Sprintf(`registry %T already contains "%v", it must not be added again`, *v, item), 1))
}
v.items.Insert(item)
return item
}
func (v *Valid[T]) Freeze() {
v.frozen = true
}
// These variables contain the parameters that [WithFeature], [WithEnvironment]
// and [WithNodeFeatures] accept. The framework itself has no pre-defined
// constants. Test suites and tests may define their own and then add them here
// before calling these With functions.
var (
ValidFeatures Valid[Feature]
ValidEnvironments Valid[Environment]
ValidNodeFeatures Valid[NodeFeature]
)
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
@ -65,8 +125,433 @@ func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocati
return codeLocation
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, args ...interface{}) bool {
args = append(args, ginkgo.Offset(1))
return ginkgo.It(text+" [Conformance]", args...)
// SIGDescribe returns a wrapper function for ginkgo.Describe which injects
// the SIG name as annotation. The parameter should be lowercase with
// no spaces and no sig- or SIG- prefix.
func SIGDescribe(sig string) func(...interface{}) bool {
if !sigRE.MatchString(sig) || strings.HasPrefix(sig, "sig-") {
RecordBug(NewBug(fmt.Sprintf("SIG label must be lowercase, no spaces and no sig- prefix, got instead: %q", sig), 1))
}
return func(args ...interface{}) bool {
args = append([]interface{}{WithLabel("sig-" + sig)}, args...)
return registerInSuite(ginkgo.Describe, args)
}
}
var sigRE = regexp.MustCompile(`^[a-z]+(-[a-z]+)*$`)
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(args ...interface{}) bool {
args = append(args, ginkgo.Offset(1), WithConformance())
return It(args...)
}
// It is a wrapper around [ginkgo.It] which supports framework With* labels as
// optional arguments in addition to those already supported by ginkgo itself,
// like [ginkgo.Label] and [gingko.Offset].
//
// Text and arguments may be mixed. The final text is a concatenation
// of the text arguments and special tags from the With functions.
func It(args ...interface{}) bool {
return registerInSuite(ginkgo.It, args)
}
// It is a shorthand for the corresponding package function.
func (f *Framework) It(args ...interface{}) bool {
return registerInSuite(ginkgo.It, args)
}
// Describe is a wrapper around [ginkgo.Describe] which supports framework
// With* labels as optional arguments in addition to those already supported by
// ginkgo itself, like [ginkgo.Label] and [gingko.Offset].
//
// Text and arguments may be mixed. The final text is a concatenation
// of the text arguments and special tags from the With functions.
func Describe(args ...interface{}) bool {
return registerInSuite(ginkgo.Describe, args)
}
// Describe is a shorthand for the corresponding package function.
func (f *Framework) Describe(args ...interface{}) bool {
return registerInSuite(ginkgo.Describe, args)
}
// Context is a wrapper around [ginkgo.Context] which supports framework With*
// labels as optional arguments in addition to those already supported by
// ginkgo itself, like [ginkgo.Label] and [gingko.Offset].
//
// Text and arguments may be mixed. The final text is a concatenation
// of the text arguments and special tags from the With functions.
func Context(args ...interface{}) bool {
return registerInSuite(ginkgo.Context, args)
}
// Context is a shorthand for the corresponding package function.
func (f *Framework) Context(args ...interface{}) bool {
return registerInSuite(ginkgo.Context, args)
}
// registerInSuite is the common implementation of all wrapper functions. It
// expects to be called through one intermediate wrapper.
func registerInSuite(ginkgoCall func(string, ...interface{}) bool, args []interface{}) bool {
var ginkgoArgs []interface{}
var offset ginkgo.Offset
var texts []string
addLabel := func(label string) {
texts = append(texts, fmt.Sprintf("[%s]", label))
ginkgoArgs = append(ginkgoArgs, ginkgo.Label(label))
}
haveEmptyStrings := false
for _, arg := range args {
switch arg := arg.(type) {
case label:
fullLabel := strings.Join(arg.parts, ":")
addLabel(fullLabel)
if arg.extra != "" {
addLabel(arg.extra)
}
if fullLabel == "Serial" {
ginkgoArgs = append(ginkgoArgs, ginkgo.Serial)
}
case ginkgo.Offset:
offset = arg
case string:
if arg == "" {
haveEmptyStrings = true
}
texts = append(texts, arg)
default:
ginkgoArgs = append(ginkgoArgs, arg)
}
}
offset += 2 // This function and its direct caller.
// Now that we have the final offset, we can record bugs.
if haveEmptyStrings {
RecordBug(NewBug("empty strings as separators are unnecessary and need to be removed", int(offset)))
}
// Enforce that text snippets to not start or end with spaces because
// those lead to double spaces when concatenating below.
for _, text := range texts {
if strings.HasPrefix(text, " ") || strings.HasSuffix(text, " ") {
RecordBug(NewBug(fmt.Sprintf("trailing or leading spaces are unnecessary and need to be removed: %q", text), int(offset)))
}
}
ginkgoArgs = append(ginkgoArgs, offset)
text := strings.Join(texts, " ")
return ginkgoCall(text, ginkgoArgs...)
}
var (
tagRe = regexp.MustCompile(`\[.*?\]`)
deprecatedTags = sets.New("Conformance", "NodeConformance", "Disruptive", "Serial", "Slow")
deprecatedTagPrefixes = sets.New("Environment", "Feature", "NodeFeature", "FeatureGate")
deprecatedStability = sets.New("Alpha", "Beta")
)
// validateSpecs checks that the test specs were registered as intended.
func validateSpecs(specs types.SpecReports) {
checked := sets.New[call]()
for _, spec := range specs {
for i, text := range spec.ContainerHierarchyTexts {
c := call{
text: text,
location: spec.ContainerHierarchyLocations[i],
}
if checked.Has(c) {
// No need to check the same container more than once.
continue
}
checked.Insert(c)
validateText(c.location, text, spec.ContainerHierarchyLabels[i])
}
c := call{
text: spec.LeafNodeText,
location: spec.LeafNodeLocation,
}
if !checked.Has(c) {
validateText(spec.LeafNodeLocation, spec.LeafNodeText, spec.LeafNodeLabels)
checked.Insert(c)
}
}
}
// call acts as (mostly) unique identifier for a container node call like
// Describe or Context. It's not perfect because theoretically a line might
// have multiple calls with the same text, but that isn't a problem in
// practice.
type call struct {
text string
location types.CodeLocation
}
// validateText checks for some known tags that should not be added through the
// plain text strings anymore. Eventually, all such tags should get replaced
// with the new APIs.
func validateText(location types.CodeLocation, text string, labels []string) {
for _, tag := range tagRe.FindAllString(text, -1) {
if tag == "[]" {
recordTextBug(location, "[] in plain text is invalid")
continue
}
// Strip square brackets.
tag = tag[1 : len(tag)-1]
if slices.Contains(labels, tag) {
// Okay, was also set as label.
continue
}
if deprecatedTags.Has(tag) {
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s instead", tag, tag))
}
if deprecatedStability.Has(tag) {
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added by defining the feature gate through WithFeatureGate instead", tag))
}
if index := strings.Index(tag, ":"); index > 0 {
prefix := tag[:index]
if deprecatedTagPrefixes.Has(prefix) {
recordTextBug(location, fmt.Sprintf("[%s] in plain text is deprecated and must be added through With%s(%s) instead", tag, prefix, tag[index+1:]))
}
}
}
}
func recordTextBug(location types.CodeLocation, message string) {
RecordBug(Bug{FileName: location.FileName, LineNumber: location.LineNumber, Message: message})
}
// WithEnvironment specifies that a certain test or group of tests only works
// with a feature available. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// The feature must be listed in ValidFeatures.
func WithFeature(name Feature) interface{} {
return withFeature(name)
}
// WithFeature is a shorthand for the corresponding package function.
func (f *Framework) WithFeature(name Feature) interface{} {
return withFeature(name)
}
func withFeature(name Feature) interface{} {
if !ValidFeatures.items.Has(name) {
RecordBug(NewBug(fmt.Sprintf("WithFeature: unknown feature %q", name), 2))
}
return newLabel("Feature", string(name))
}
// WithFeatureGate specifies that a certain test or group of tests depends on a
// feature gate being enabled. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// The feature gate must be listed in
// [k8s.io/apiserver/pkg/util/feature.DefaultMutableFeatureGate]. Once a
// feature gate gets removed from there, the WithFeatureGate calls using it
// also need to be removed.
func WithFeatureGate(featureGate featuregate.Feature) interface{} {
return withFeatureGate(featureGate)
}
// WithFeatureGate is a shorthand for the corresponding package function.
func (f *Framework) WithFeatureGate(featureGate featuregate.Feature) interface{} {
return withFeatureGate(featureGate)
}
func withFeatureGate(featureGate featuregate.Feature) interface{} {
spec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[featureGate]
if !ok {
RecordBug(NewBug(fmt.Sprintf("WithFeatureGate: the feature gate %q is unknown", featureGate), 2))
}
// We use mixed case (i.e. Beta instead of BETA). GA feature gates have no level string.
var level string
if spec.PreRelease != "" {
level = string(spec.PreRelease)
level = strings.ToUpper(level[0:1]) + strings.ToLower(level[1:])
}
l := newLabel("FeatureGate", string(featureGate))
l.extra = level
return l
}
// WithEnvironment specifies that a certain test or group of tests only works
// in a certain environment. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// The environment must be listed in ValidEnvironments.
func WithEnvironment(name Environment) interface{} {
return withEnvironment(name)
}
// WithEnvironment is a shorthand for the corresponding package function.
func (f *Framework) WithEnvironment(name Environment) interface{} {
return withEnvironment(name)
}
func withEnvironment(name Environment) interface{} {
if !ValidEnvironments.items.Has(name) {
RecordBug(NewBug(fmt.Sprintf("WithEnvironment: unknown environment %q", name), 2))
}
return newLabel("Environment", string(name))
}
// WithNodeFeature specifies that a certain test or group of tests only works
// if the node supports a certain feature. The return value must be passed as
// additional argument to [framework.It], [framework.Describe],
// [framework.Context].
//
// The environment must be listed in ValidNodeFeatures.
func WithNodeFeature(name NodeFeature) interface{} {
return withNodeFeature(name)
}
// WithNodeFeature is a shorthand for the corresponding package function.
func (f *Framework) WithNodeFeature(name NodeFeature) interface{} {
return withNodeFeature(name)
}
func withNodeFeature(name NodeFeature) interface{} {
if !ValidNodeFeatures.items.Has(name) {
RecordBug(NewBug(fmt.Sprintf("WithNodeFeature: unknown environment %q", name), 2))
}
return newLabel("NodeFeature", string(name))
}
// WithConformace specifies that a certain test or group of tests must pass in
// all conformant Kubernetes clusters. The return value must be passed as
// additional argument to [framework.It], [framework.Describe],
// [framework.Context].
func WithConformance() interface{} {
return withConformance()
}
// WithConformance is a shorthand for the corresponding package function.
func (f *Framework) WithConformance() interface{} {
return withConformance()
}
func withConformance() interface{} {
return newLabel("Conformance")
}
// WithNodeConformance specifies that a certain test or group of tests for node
// functionality that does not depend on runtime or Kubernetes distro specific
// behavior. The return value must be passed as additional argument to
// [framework.It], [framework.Describe], [framework.Context].
func WithNodeConformance() interface{} {
return withNodeConformance()
}
// WithNodeConformance is a shorthand for the corresponding package function.
func (f *Framework) WithNodeConformance() interface{} {
return withNodeConformance()
}
func withNodeConformance() interface{} {
return newLabel("NodeConformance")
}
// WithDisruptive specifies that a certain test or group of tests temporarily
// affects the functionality of the Kubernetes cluster. The return value must
// be passed as additional argument to [framework.It], [framework.Describe],
// [framework.Context].
func WithDisruptive() interface{} {
return withDisruptive()
}
// WithDisruptive is a shorthand for the corresponding package function.
func (f *Framework) WithDisruptive() interface{} {
return withDisruptive()
}
func withDisruptive() interface{} {
return newLabel("Disruptive")
}
// WithSerial specifies that a certain test or group of tests must not run in
// parallel with other tests. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
//
// Starting with ginkgo v2, serial and parallel tests can be executed in the
// same invocation. Ginkgo itself will ensure that the serial tests run
// sequentially.
func WithSerial() interface{} {
return withSerial()
}
// WithSerial is a shorthand for the corresponding package function.
func (f *Framework) WithSerial() interface{} {
return withSerial()
}
func withSerial() interface{} {
return newLabel("Serial")
}
// WithSlow specifies that a certain test or group of tests must not run in
// parallel with other tests. The return value must be passed as additional
// argument to [framework.It], [framework.Describe], [framework.Context].
func WithSlow() interface{} {
return withSlow()
}
// WithSlow is a shorthand for the corresponding package function.
func (f *Framework) WithSlow() interface{} {
return WithSlow()
}
func withSlow() interface{} {
return newLabel("Slow")
}
// WithLabel is a wrapper around [ginkgo.Label]. Besides adding an arbitrary
// label to a test, it also injects the label in square brackets into the test
// name.
func WithLabel(label string) interface{} {
return withLabel(label)
}
// WithLabel is a shorthand for the corresponding package function.
func (f *Framework) WithLabel(label string) interface{} {
return withLabel(label)
}
func withLabel(label string) interface{} {
return newLabel(label)
}
type label struct {
// parts get concatenated with ":" to build the full label.
parts []string
// extra is an optional fully-formed extra label.
extra string
}
func newLabel(parts ...string) label {
return label{parts: parts}
}
// TagsEqual can be used to check whether two tags are the same.
// It's safe to compare e.g. the result of WithSlow() against the result
// of WithSerial(), the result will be false. False is also returned
// when a parameter is some completely different value.
func TagsEqual(a, b interface{}) bool {
al, ok := a.(label)
if !ok {
return false
}
bl, ok := b.(label)
if !ok {
return false
}
if al.extra != bl.extra {
return false
}
return slices.Equal(al.parts, bl.parts)
}

View File

@ -36,6 +36,10 @@ func WriteJUnitReport(report ginkgo.Report, filename string) error {
// both, then tools like kettle and spyglass would concatenate
// the two strings and thus show duplicated information.
OmitFailureMessageAttr: true,
// All labels are also part of the spec texts in inline [] tags,
// so we don't need to write them separately.
OmitSpecLabels: true,
}
return reporters.GenerateJUnitReportWithConfig(report, filename, config)

View File

@ -181,21 +181,34 @@ func (g *Grabber) GrabFromKubelet(ctx context.Context, nodeName string) (Kubelet
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
}
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort))
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort), "metrics")
}
func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int) (KubeletMetrics, error) {
// GrabresourceMetricsFromKubelet returns resource metrics from kubelet
func (g *Grabber) GrabResourceMetricsFromKubelet(ctx context.Context, nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
if err != nil {
return KubeletMetrics{}, err
}
if len(nodes.Items) != 1 {
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
}
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort), "metrics/resource")
}
func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int, pathSuffix string) (KubeletMetrics, error) {
if kubeletPort <= 0 || kubeletPort > 65535 {
return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering", kubeletPort)
}
output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort))
output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort), pathSuffix)
if err != nil {
return KubeletMetrics{}, err
}
return parseKubeletMetrics(output)
}
func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int) (string, error) {
func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int, pathSuffix string) (string, error) {
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
finished := make(chan struct{}, 1)
var err error
@ -205,7 +218,7 @@ func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubel
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
Suffix("metrics").
Suffix(pathSuffix).
Do(ctx).Raw()
finished <- struct{}{}
}()
@ -239,7 +252,7 @@ func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, erro
var lastMetricsFetchErr error
var output string
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@ -290,7 +303,7 @@ func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerMana
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@ -329,7 +342,7 @@ func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
if metricsWaitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
@ -432,7 +445,7 @@ func (g *Grabber) Grab(ctx context.Context) (Collection, error) {
} else {
for _, node := range nodes.Items {
kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port
metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort))
metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort), "metrics")
if err != nil {
errs = append(errs, err)
}

View File

@ -22,6 +22,7 @@ import (
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -63,7 +64,7 @@ func ExpectNodeHasLabel(ctx context.Context, c clientset.Interface, nodeName str
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(node.Labels[labelKey], labelValue)
gomega.Expect(node.Labels).To(gomega.HaveKeyWithValue(labelKey, labelValue))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
@ -120,7 +121,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, framework.Poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@ -141,7 +142,7 @@ func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Dura
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
if err != nil && !wait.Interrupted(err) {
return err
}

View File

@ -36,7 +36,7 @@ func WaitForSSHTunnels(ctx context.Context, namespace string) {
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, func(ctx context.Context) (bool, error) {
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})

View File

@ -51,7 +51,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, poll, timeout, true, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
@ -96,7 +96,7 @@ func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout tim
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
if err != nil && !wait.Interrupted(err) {
return err
}
@ -192,7 +192,7 @@ func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout ti
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) {
if wait.PollUntilContextTimeout(ctx, poll, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})

View File

@ -24,8 +24,8 @@ import (
)
// Get creates a function which retrieves the pod anew each time the function
// is called. Fatal errors are detected by framework.HandleRetry and cause
// is called. Fatal errors are detected by framework.GetObject and cause
// polling to stop.
func Get(c clientset.Interface, pod framework.NamedObject) framework.GetFunc[*v1.Pod] {
return framework.HandleRetry(framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{}))
return framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{})
}

View File

@ -297,7 +297,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
var resultPV *v1.PersistentVolume
var lastCreateErr error
err := wait.PollImmediateWithContext(ctx, 29*time.Second, timeouts.PVCreate, func(ctx context.Context) (done bool, err error) {
err := wait.PollUntilContextTimeout(ctx, 29*time.Second, timeouts.PVCreate, true, func(ctx context.Context) (done bool, err error) {
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
if lastCreateErr != nil {
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.
@ -648,7 +648,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
Spec: v1.PersistentVolumeClaimSpec{
Selector: cfg.Selector,
AccessModes: cfg.AccessModes,
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse(cfg.ClaimSize),
},

View File

@ -86,6 +86,11 @@ func GetSigner(provider string) (ssh.Signer, error) {
if keyfile == "" {
keyfile = "id_rsa"
}
case "azure":
keyfile = os.Getenv("AZURE_SSH_KEY")
if keyfile == "" {
keyfile = "id_rsa"
}
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
@ -422,7 +427,7 @@ func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediateWithContext(ctx, pollNodeInterval, singleCallTimeout, func(ctx context.Context) (bool, error) {
if wait.PollUntilContextTimeout(ctx, pollNodeInterval, singleCallTimeout, true, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})

View File

@ -23,9 +23,11 @@ import (
"errors"
"flag"
"fmt"
"io"
"math"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
@ -36,6 +38,7 @@ import (
"github.com/onsi/gomega"
gomegaformat "github.com/onsi/gomega/format"
"k8s.io/apimachinery/pkg/util/sets"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
cliflag "k8s.io/component-base/cli/flag"
@ -53,6 +56,20 @@ const (
DefaultNumNodes = -1
)
var (
// Output is used for output when not running tests, for example in -list-tests.
// Test output should go to ginkgo.GinkgoWriter.
Output io.Writer = os.Stdout
// Exit is called when the framework detects fatal errors or when
// it is done with the execution of e.g. -list-tests.
Exit = os.Exit
// CheckForBugs determines whether the framework bails out when
// test initialization found any bugs.
CheckForBugs = true
)
// TestContextType contains test settings and global state. Due to
// historic reasons, it is a mixture of items managed by the test
// framework itself, cloud providers and individual tests.
@ -82,18 +99,21 @@ const (
// Test suite authors can use framework/viper to make all command line
// parameters also configurable via a configuration file.
type TestContextType struct {
KubeConfig string
KubeContext string
KubeAPIContentType string
KubeletRootDir string
CertDir string
Host string
BearerToken string `datapolicy:"token"`
KubeConfig string
KubeContext string
KubeAPIContentType string
KubeletRootDir string
KubeletConfigDropinDir string
CertDir string
Host string
BearerToken string `datapolicy:"token"`
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
RepoRoot string
// ListImages will list off all images that are used then quit
ListImages bool
listTests, listLabels bool
// ListConformanceTests will list off all conformance tests that are available then quit
ListConformanceTests bool
@ -356,6 +376,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.")
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for running tests.")
flags.BoolVar(&TestContext.listLabels, "list-labels", false, "If true, will show the list of labels that can be used to select tests via -ginkgo.label-filter.")
flags.BoolVar(&TestContext.listTests, "list-tests", false, "If true, will show the full names of all tests (aka specs) that can be used to select test via -ginkgo.focus/skip.")
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flags.StringVar(&TestContext.ProgressReportURL, "progress-report-url", "", "The URL to POST progress updates to as the suite runs to assist in aiding integrations. If empty, no messages sent.")
@ -482,7 +504,7 @@ func AfterReadingAllFlags(t *TestContextType) {
for _, v := range image.GetImageConfigs() {
fmt.Println(v.GetE2EImage())
}
os.Exit(0)
Exit(0)
}
// Reconfigure gomega defaults. The poll interval should be suitable
@ -494,6 +516,20 @@ func AfterReadingAllFlags(t *TestContextType) {
gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart)
gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort)
// ginkgo.PreviewSpecs will expand all nodes and thus may find new bugs.
report := ginkgo.PreviewSpecs("Kubernetes e2e test statistics")
validateSpecs(report.SpecReports)
if err := FormatBugs(); CheckForBugs && err != nil {
// Refuse to do anything if the E2E suite is buggy.
fmt.Fprint(Output, "ERROR: E2E suite initialization was faulty, these errors must be fixed:")
fmt.Fprint(Output, "\n"+err.Error())
Exit(1)
}
if t.listLabels || t.listTests {
listTestInformation(report)
Exit(0)
}
// Only set a default host if one won't be supplied via kubeconfig
if len(t.Host) == 0 && len(t.KubeConfig) == 0 {
// Check if we can use the in-cluster config
@ -553,7 +589,7 @@ func AfterReadingAllFlags(t *TestContextType) {
} else {
klog.Errorf("Failed to setup provider config for %q: %v", TestContext.Provider, err)
}
os.Exit(1)
Exit(1)
}
if TestContext.ReportDir != "" {
@ -563,13 +599,13 @@ func AfterReadingAllFlags(t *TestContextType) {
// in parallel, so we will get "exists" error in most of them.
if err := os.MkdirAll(TestContext.ReportDir, 0777); err != nil && !os.IsExist(err) {
klog.Errorf("Create report dir: %v", err)
os.Exit(1)
Exit(1)
}
ginkgoDir := path.Join(TestContext.ReportDir, "ginkgo")
if TestContext.ReportCompleteGinkgo || TestContext.ReportCompleteJUnit {
if err := os.MkdirAll(ginkgoDir, 0777); err != nil && !os.IsExist(err) {
klog.Errorf("Create <report-dir>/ginkgo: %v", err)
os.Exit(1)
Exit(1)
}
}
@ -600,3 +636,47 @@ func AfterReadingAllFlags(t *TestContextType) {
})
}
}
func listTestInformation(report ginkgo.Report) {
indent := strings.Repeat(" ", 4)
if TestContext.listLabels {
labels := sets.New[string]()
for _, spec := range report.SpecReports {
if spec.LeafNodeType == types.NodeTypeIt {
labels.Insert(spec.Labels()...)
}
}
fmt.Fprintf(Output, "The following labels can be used with 'gingko run --label-filter':\n%s%s\n\n", indent, strings.Join(sets.List(labels), "\n"+indent))
}
if TestContext.listTests {
leafs := make([][]string, 0, len(report.SpecReports))
wd, _ := os.Getwd()
for _, spec := range report.SpecReports {
if spec.LeafNodeType == types.NodeTypeIt {
leafs = append(leafs, []string{fmt.Sprintf("%s:%d: ", relativePath(wd, spec.LeafNodeLocation.FileName), spec.LeafNodeLocation.LineNumber), spec.FullText()})
}
}
// Sort by test name, not the source code location, because the test
// name is more stable across code refactoring.
sort.Slice(leafs, func(i, j int) bool {
return leafs[i][1] < leafs[j][1]
})
fmt.Fprint(Output, "The following spec names can be used with 'ginkgo run --focus/skip':\n")
for _, leaf := range leafs {
fmt.Fprintf(Output, "%s%s%s\n", indent, leaf[0], leaf[1])
}
fmt.Fprint(Output, "\n")
}
}
func relativePath(wd, path string) string {
if wd == "" {
return path
}
relpath, err := filepath.Rel(wd, path)
if err != nil {
return path
}
return relpath
}

View File

@ -39,6 +39,7 @@ var defaultTimeouts = TimeoutContext{
SystemPodsStartup: 10 * time.Minute,
NodeSchedulable: 30 * time.Minute,
SystemDaemonsetStartup: 5 * time.Minute,
NodeNotReady: 3 * time.Minute,
}
// TimeoutContext contains timeout settings for several actions.
@ -106,6 +107,9 @@ type TimeoutContext struct {
// SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready.
SystemDaemonsetStartup time.Duration
// NodeNotReady is how long to wait for a node to be not ready.
NodeNotReady time.Duration
}
// NewTimeoutContext returns a TimeoutContext with all values set either to

View File

@ -136,7 +136,7 @@ var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
// ProvidersWithSSH are those providers where each node is accessible with SSH
ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"}
// ServeHostnameImage is a serve hostname image name.
ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost)
@ -352,7 +352,7 @@ func CreateTestingNS(ctx context.Context, baseName string, c clientset.Interface
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediateWithContext(ctx, Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, Poll, 30*time.Second, true, func(ctx context.Context) (bool, error) {
var err error
got, err = c.CoreV1().Namespaces().Create(ctx, namespaceObj, metav1.CreateOptions{})
if err != nil {
@ -808,7 +808,7 @@ retriesLoop:
if errs.Len() > 0 {
Failf("Unexpected error(s): %v", strings.Join(errs.List(), "\n - "))
}
ExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents))
gomega.Expect(expectedWatchEvents).To(gomega.HaveLen(totalValidWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents))
break retriesLoop
}
}

View File

@ -238,7 +238,7 @@ func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName stri
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) {
waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.
@ -697,7 +697,7 @@ func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exi
if err != nil {
if exiterr, ok := err.(clientexec.ExitError); ok {
actualExitCode := exiterr.ExitStatus()
framework.ExpectEqual(actualExitCode, exitCode,
gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
} else {
@ -706,5 +706,5 @@ func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exi
shExec, exitCode, err, stdout, stderr)
}
}
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode)
}

View File

@ -16,9 +16,7 @@ limitations under the License.
package utils
import "github.com/onsi/ginkgo/v2"
import "k8s.io/kubernetes/test/e2e/framework"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-storage] "+text, body)
}
var SIGDescribe = framework.SIGDescribe("storage")

View File

@ -70,8 +70,7 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
framework.ExpectNoError(err)
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
fsGroupResult := strings.Fields(stdout)[3]
framework.ExpectEqual(expectedFSGroup, fsGroupResult,
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
gomega.Expect(expectedFSGroup).To(gomega.Equal(fsGroupResult), "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
}
// getKubeletMainPid return the Main PID of the Kubelet Process
@ -141,14 +140,14 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
if checkSubpath {
ginkgo.By("Expecting the volume subpath mount to be found.")
result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
ginkgo.By("Writing to the volume.")
@ -201,7 +200,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clie
result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Testing that written file is accessible in the second pod.")
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
@ -262,13 +261,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c client
result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Expecting the symlinks from global map path to be found.")
result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
@ -699,7 +698,7 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
ll := strings.Fields(stdout)
framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid)
framework.ExpectEqual(ll[3], expectedGid)
gomega.Expect(ll[3]).To(gomega.Equal(expectedGid))
}
// ChangeFilePathGidInPod changes the GID of the target filepath.

View File

@ -11,6 +11,8 @@ spec:
metadata:
labels:
app: httpd
annotations:
annotations_app: annotations_httpd
spec:
containers:
- name: httpd

View File

@ -31,6 +31,9 @@ spec:
- name: dev
hostPath:
path: /dev
- name: cdi-dir
hostPath:
path: /var/run/cdi
containers:
- image: registry.k8s.io/e2e-test-images/sample-device-plugin:1.3
name: sample-device-plugin
@ -46,5 +49,7 @@ spec:
mountPath: /var/lib/kubelet/plugins_registry
- name: dev
mountPath: /dev
- name: cdi-dir
mountPath: /var/run/cdi
updateStrategy:
type: RollingUpdate

View File

@ -52,8 +52,8 @@ spec:
# Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/
# and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS.
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 - suitable for COS M97 as per https://cloud.google.com/container-optimized-os/docs/release-notes
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.27
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.1.9 - suitable for COS M109 as per https://cloud.google.com/container-optimized-os/docs/release-notes
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.1.9
name: nvidia-driver-installer
resources:
requests:

View File

@ -238,11 +238,11 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.7"}
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"}
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.3"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.10-0"}
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}

View File

@ -1502,7 +1502,7 @@ func makeUnboundPersistentVolumeClaim(storageClass string) *v1.PersistentVolumeC
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
StorageClassName: &storageClass,
Resources: v1.ResourceRequirements{
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},

View File

@ -14,12 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Deprecated: Use functions in k8s.io/utils/ptr instead: ptr.To to obtain
// a pointer, ptr.Deref to dereference a pointer, ptr.Equal to compare
// dereferenced pointers.
package pointer
import (
"fmt"
"reflect"
"time"
"k8s.io/utils/ptr"
)
// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when,
@ -28,383 +31,219 @@ import (
//
// This function is only valid for structs and pointers to structs. Any other
// type will cause a panic. Passing a typed nil pointer will return true.
func AllPtrFieldsNil(obj interface{}) bool {
v := reflect.ValueOf(obj)
if !v.IsValid() {
panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj))
}
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return true
}
v = v.Elem()
}
for i := 0; i < v.NumField(); i++ {
if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
return false
}
}
return true
}
//
// Deprecated: Use ptr.AllPtrFieldsNil instead.
var AllPtrFieldsNil = ptr.AllPtrFieldsNil
// Int returns a pointer to an int
func Int(i int) *int {
return &i
}
// Int returns a pointer to an int.
var Int = ptr.To[int]
// IntPtr is a function variable referring to Int.
//
// Deprecated: Use Int instead.
// Deprecated: Use ptr.To instead.
var IntPtr = Int // for back-compat
// IntDeref dereferences the int ptr and returns it if not nil, or else
// returns def.
func IntDeref(ptr *int, def int) int {
if ptr != nil {
return *ptr
}
return def
}
var IntDeref = ptr.Deref[int]
// IntPtrDerefOr is a function variable referring to IntDeref.
//
// Deprecated: Use IntDeref instead.
// Deprecated: Use ptr.Deref instead.
var IntPtrDerefOr = IntDeref // for back-compat
// Int32 returns a pointer to an int32.
func Int32(i int32) *int32 {
return &i
}
var Int32 = ptr.To[int32]
// Int32Ptr is a function variable referring to Int32.
//
// Deprecated: Use Int32 instead.
// Deprecated: Use ptr.To instead.
var Int32Ptr = Int32 // for back-compat
// Int32Deref dereferences the int32 ptr and returns it if not nil, or else
// returns def.
func Int32Deref(ptr *int32, def int32) int32 {
if ptr != nil {
return *ptr
}
return def
}
var Int32Deref = ptr.Deref[int32]
// Int32PtrDerefOr is a function variable referring to Int32Deref.
//
// Deprecated: Use Int32Deref instead.
// Deprecated: Use ptr.Deref instead.
var Int32PtrDerefOr = Int32Deref // for back-compat
// Int32Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Int32Equal(a, b *int32) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var Int32Equal = ptr.Equal[int32]
// Uint returns a pointer to an uint
func Uint(i uint) *uint {
return &i
}
var Uint = ptr.To[uint]
// UintPtr is a function variable referring to Uint.
//
// Deprecated: Use Uint instead.
// Deprecated: Use ptr.To instead.
var UintPtr = Uint // for back-compat
// UintDeref dereferences the uint ptr and returns it if not nil, or else
// returns def.
func UintDeref(ptr *uint, def uint) uint {
if ptr != nil {
return *ptr
}
return def
}
var UintDeref = ptr.Deref[uint]
// UintPtrDerefOr is a function variable referring to UintDeref.
//
// Deprecated: Use UintDeref instead.
// Deprecated: Use ptr.Deref instead.
var UintPtrDerefOr = UintDeref // for back-compat
// Uint32 returns a pointer to an uint32.
func Uint32(i uint32) *uint32 {
return &i
}
var Uint32 = ptr.To[uint32]
// Uint32Ptr is a function variable referring to Uint32.
//
// Deprecated: Use Uint32 instead.
// Deprecated: Use ptr.To instead.
var Uint32Ptr = Uint32 // for back-compat
// Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else
// returns def.
func Uint32Deref(ptr *uint32, def uint32) uint32 {
if ptr != nil {
return *ptr
}
return def
}
var Uint32Deref = ptr.Deref[uint32]
// Uint32PtrDerefOr is a function variable referring to Uint32Deref.
//
// Deprecated: Use Uint32Deref instead.
// Deprecated: Use ptr.Deref instead.
var Uint32PtrDerefOr = Uint32Deref // for back-compat
// Uint32Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Uint32Equal(a, b *uint32) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var Uint32Equal = ptr.Equal[uint32]
// Int64 returns a pointer to an int64.
func Int64(i int64) *int64 {
return &i
}
var Int64 = ptr.To[int64]
// Int64Ptr is a function variable referring to Int64.
//
// Deprecated: Use Int64 instead.
// Deprecated: Use ptr.To instead.
var Int64Ptr = Int64 // for back-compat
// Int64Deref dereferences the int64 ptr and returns it if not nil, or else
// returns def.
func Int64Deref(ptr *int64, def int64) int64 {
if ptr != nil {
return *ptr
}
return def
}
var Int64Deref = ptr.Deref[int64]
// Int64PtrDerefOr is a function variable referring to Int64Deref.
//
// Deprecated: Use Int64Deref instead.
// Deprecated: Use ptr.Deref instead.
var Int64PtrDerefOr = Int64Deref // for back-compat
// Int64Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Int64Equal(a, b *int64) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var Int64Equal = ptr.Equal[int64]
// Uint64 returns a pointer to an uint64.
func Uint64(i uint64) *uint64 {
return &i
}
var Uint64 = ptr.To[uint64]
// Uint64Ptr is a function variable referring to Uint64.
//
// Deprecated: Use Uint64 instead.
// Deprecated: Use ptr.To instead.
var Uint64Ptr = Uint64 // for back-compat
// Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else
// returns def.
func Uint64Deref(ptr *uint64, def uint64) uint64 {
if ptr != nil {
return *ptr
}
return def
}
var Uint64Deref = ptr.Deref[uint64]
// Uint64PtrDerefOr is a function variable referring to Uint64Deref.
//
// Deprecated: Use Uint64Deref instead.
// Deprecated: Use ptr.Deref instead.
var Uint64PtrDerefOr = Uint64Deref // for back-compat
// Uint64Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Uint64Equal(a, b *uint64) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var Uint64Equal = ptr.Equal[uint64]
// Bool returns a pointer to a bool.
func Bool(b bool) *bool {
return &b
}
var Bool = ptr.To[bool]
// BoolPtr is a function variable referring to Bool.
//
// Deprecated: Use Bool instead.
// Deprecated: Use ptr.To instead.
var BoolPtr = Bool // for back-compat
// BoolDeref dereferences the bool ptr and returns it if not nil, or else
// returns def.
func BoolDeref(ptr *bool, def bool) bool {
if ptr != nil {
return *ptr
}
return def
}
var BoolDeref = ptr.Deref[bool]
// BoolPtrDerefOr is a function variable referring to BoolDeref.
//
// Deprecated: Use BoolDeref instead.
// Deprecated: Use ptr.Deref instead.
var BoolPtrDerefOr = BoolDeref // for back-compat
// BoolEqual returns true if both arguments are nil or both arguments
// dereference to the same value.
func BoolEqual(a, b *bool) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var BoolEqual = ptr.Equal[bool]
// String returns a pointer to a string.
func String(s string) *string {
return &s
}
var String = ptr.To[string]
// StringPtr is a function variable referring to String.
//
// Deprecated: Use String instead.
// Deprecated: Use ptr.To instead.
var StringPtr = String // for back-compat
// StringDeref dereferences the string ptr and returns it if not nil, or else
// returns def.
func StringDeref(ptr *string, def string) string {
if ptr != nil {
return *ptr
}
return def
}
var StringDeref = ptr.Deref[string]
// StringPtrDerefOr is a function variable referring to StringDeref.
//
// Deprecated: Use StringDeref instead.
// Deprecated: Use ptr.Deref instead.
var StringPtrDerefOr = StringDeref // for back-compat
// StringEqual returns true if both arguments are nil or both arguments
// dereference to the same value.
func StringEqual(a, b *string) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var StringEqual = ptr.Equal[string]
// Float32 returns a pointer to a float32.
func Float32(i float32) *float32 {
return &i
}
var Float32 = ptr.To[float32]
// Float32Ptr is a function variable referring to Float32.
//
// Deprecated: Use Float32 instead.
// Deprecated: Use ptr.To instead.
var Float32Ptr = Float32
// Float32Deref dereferences the float32 ptr and returns it if not nil, or else
// returns def.
func Float32Deref(ptr *float32, def float32) float32 {
if ptr != nil {
return *ptr
}
return def
}
var Float32Deref = ptr.Deref[float32]
// Float32PtrDerefOr is a function variable referring to Float32Deref.
//
// Deprecated: Use Float32Deref instead.
// Deprecated: Use ptr.Deref instead.
var Float32PtrDerefOr = Float32Deref // for back-compat
// Float32Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Float32Equal(a, b *float32) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var Float32Equal = ptr.Equal[float32]
// Float64 returns a pointer to a float64.
func Float64(i float64) *float64 {
return &i
}
var Float64 = ptr.To[float64]
// Float64Ptr is a function variable referring to Float64.
//
// Deprecated: Use Float64 instead.
// Deprecated: Use ptr.To instead.
var Float64Ptr = Float64
// Float64Deref dereferences the float64 ptr and returns it if not nil, or else
// returns def.
func Float64Deref(ptr *float64, def float64) float64 {
if ptr != nil {
return *ptr
}
return def
}
var Float64Deref = ptr.Deref[float64]
// Float64PtrDerefOr is a function variable referring to Float64Deref.
//
// Deprecated: Use Float64Deref instead.
// Deprecated: Use ptr.Deref instead.
var Float64PtrDerefOr = Float64Deref // for back-compat
// Float64Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Float64Equal(a, b *float64) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var Float64Equal = ptr.Equal[float64]
// Duration returns a pointer to a time.Duration.
func Duration(d time.Duration) *time.Duration {
return &d
}
var Duration = ptr.To[time.Duration]
// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else
// returns def.
func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration {
if ptr != nil {
return *ptr
}
return def
}
var DurationDeref = ptr.Deref[time.Duration]
// DurationEqual returns true if both arguments are nil or both arguments
// dereference to the same value.
func DurationEqual(a, b *time.Duration) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}
var DurationEqual = ptr.Equal[time.Duration]

10
vendor/k8s.io/utils/ptr/OWNERS generated vendored Normal file
View File

@ -0,0 +1,10 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- apelisse
- stewart-yu
- thockin
reviewers:
- apelisse
- stewart-yu
- thockin

3
vendor/k8s.io/utils/ptr/README.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# Pointer
This package provides some functions for pointer-based operations.

73
vendor/k8s.io/utils/ptr/ptr.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ptr
import (
"fmt"
"reflect"
)
// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when,
// for example, an API struct is handled by plugins which need to distinguish
// "no plugin accepted this spec" from "this spec is empty".
//
// This function is only valid for structs and pointers to structs. Any other
// type will cause a panic. Passing a typed nil pointer will return true.
func AllPtrFieldsNil(obj interface{}) bool {
v := reflect.ValueOf(obj)
if !v.IsValid() {
panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj))
}
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return true
}
v = v.Elem()
}
for i := 0; i < v.NumField(); i++ {
if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() {
return false
}
}
return true
}
// To returns a pointer to the given value.
func To[T any](v T) *T {
return &v
}
// Deref dereferences ptr and returns the value it points to if no nil, or else
// returns def.
func Deref[T any](ptr *T, def T) T {
if ptr != nil {
return *ptr
}
return def
}
// Equal returns true if both arguments are nil or both arguments
// dereference to the same value.
func Equal[T comparable](a, b *T) bool {
if (a == nil) != (b == nil) {
return false
}
if a == nil {
return true
}
return *a == *b
}