mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-17 20:00:23 +00:00
rebase: update k8s.io packages to v0.29.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
parent
328a264202
commit
f080b9e0c9
76
go.mod
76
go.mod
@ -35,13 +35,13 @@ require (
|
||||
//
|
||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||
//
|
||||
k8s.io/api v0.28.4
|
||||
k8s.io/apimachinery v0.28.4
|
||||
k8s.io/api v0.29.0
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/cloud-provider v0.28.3
|
||||
k8s.io/cloud-provider v0.29.0
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/kubernetes v1.29.0
|
||||
k8s.io/mount-utils v0.28.3
|
||||
k8s.io/mount-utils v0.29.0
|
||||
k8s.io/pod-security-admission v0.0.0
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/controller-runtime v0.16.3
|
||||
@ -89,6 +89,7 @@ require (
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
@ -109,7 +110,7 @@ require (
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
@ -119,6 +120,7 @@ require (
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/openshift/api v0.0.0-20230320192226-1fc631efd341 // indirect
|
||||
@ -163,11 +165,11 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.28.3 // indirect
|
||||
k8s.io/apiserver v0.28.3 // indirect
|
||||
k8s.io/component-base v0.28.3 // indirect
|
||||
k8s.io/component-helpers v0.28.3 // indirect
|
||||
k8s.io/controller-manager v0.28.3 // indirect
|
||||
k8s.io/kms v0.28.3 // indirect
|
||||
k8s.io/apiserver v0.29.0 // indirect
|
||||
k8s.io/component-base v0.29.0 // indirect
|
||||
k8s.io/component-helpers v0.29.0 // indirect
|
||||
k8s.io/controller-manager v0.29.0 // indirect
|
||||
k8s.io/kms v0.29.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
k8s.io/kubectl v0.0.0 // indirect
|
||||
k8s.io/kubelet v0.0.0 // indirect
|
||||
@ -187,33 +189,33 @@ replace (
|
||||
//
|
||||
// k8s.io/kubernetes depends on these k8s.io packages, but unversioned
|
||||
//
|
||||
k8s.io/api => k8s.io/api v0.28.3
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.3
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.28.3
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.28.3
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.3
|
||||
k8s.io/client-go => k8s.io/client-go v0.28.3
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.3
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.3
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.28.3
|
||||
k8s.io/component-base => k8s.io/component-base v0.28.3
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.28.3
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.28.3
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.28.3
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.3
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.3
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.28.3
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.3
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.3
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.3
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.3
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.28.3
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.28.3
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.3
|
||||
k8s.io/metrics => k8s.io/metrics v0.28.3
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.28.3
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.3
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.3
|
||||
k8s.io/api => k8s.io/api v0.29.0
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.0
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.29.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.29.0
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.0
|
||||
k8s.io/client-go => k8s.io/client-go v0.29.0
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.0
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.0
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.29.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.29.0
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.29.0
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.29.0
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.29.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.0
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.0
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.29.0
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.29.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.29.0
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.0
|
||||
k8s.io/metrics => k8s.io/metrics v0.29.0
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.29.0
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.0
|
||||
// layeh.com seems to be misbehaving
|
||||
layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
||||
)
|
||||
|
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
39
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
39
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
# Gorilla WebSocket
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
|
||||
[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket)
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
|
||||
---
|
||||
|
||||
⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)**
|
||||
|
||||
---
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
422
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
422
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
@ -0,0 +1,422 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
//
|
||||
// It is safe to call Dialer's methods concurrently.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, NetDial is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If
|
||||
// NetDialTLSContext is nil, NetDialContext is used.
|
||||
// If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and
|
||||
// TLSClientConfig is ignored.
|
||||
NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
// If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake
|
||||
// is done there and TLSClientConfig is ignored.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer.
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
switch u.Scheme {
|
||||
case "http":
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
}
|
||||
case "https":
|
||||
if d.NetDialTLSContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialTLSContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
}
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if netDial == nil {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" && d.NetDialTLSContext == nil {
|
||||
// If NetDialTLSContext is set, assume that the TLS handshake has already been done
|
||||
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
if trace != nil && trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(ctx, tlsConn, cfg)
|
||||
if trace != nil && trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!tokenListContainsValue(resp.Header, "Upgrade", "websocket") ||
|
||||
!tokenListContainsValue(resp.Header, "Connection", "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
1230
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
1230
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
//
|
||||
// Buffers
|
||||
//
|
||||
// Connections buffer network input and output to reduce the number
|
||||
// of system calls when reading or writing messages.
|
||||
//
|
||||
// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
|
||||
// Section 5 for a discussion of message framing. A WebSocket frame header is
|
||||
// written to the network each time a write buffer is flushed to the network.
|
||||
// Decreasing the size of the write buffer can increase the amount of framing
|
||||
// overhead on the connection.
|
||||
//
|
||||
// The buffer sizes in bytes are specified by the ReadBufferSize and
|
||||
// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
|
||||
// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
|
||||
// buffers created by the HTTP server when a buffer size field is set to zero.
|
||||
// The HTTP server buffers have a size of 4096 at the time of this writing.
|
||||
//
|
||||
// The buffer sizes do not limit the size of a message that can be read or
|
||||
// written by a connection.
|
||||
//
|
||||
// Buffers are held for the lifetime of the connection by default. If the
|
||||
// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
|
||||
// write buffer only when writing a message.
|
||||
//
|
||||
// Applications should tune the buffer sizes to balance memory use and
|
||||
// performance. Increasing the buffer size uses more memory, but can reduce the
|
||||
// number of system calls to read or write the network. In the case of writing,
|
||||
// increasing the buffer size can reduce the number of frame headers written to
|
||||
// the network.
|
||||
//
|
||||
// Some guidelines for setting buffer parameters are:
|
||||
//
|
||||
// Limit the buffer sizes to the maximum expected message size. Buffers larger
|
||||
// than the largest message do not provide any benefit.
|
||||
//
|
||||
// Depending on the distribution of message sizes, setting the buffer size to
|
||||
// a value less than the maximum expected message size can greatly reduce memory
|
||||
// use with a small impact on performance. Here's an example: If 99% of the
|
||||
// messages are smaller than 256 bytes and the maximum message size is 512
|
||||
// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
|
||||
// than a buffer size of 512 bytes. The memory savings is 50%.
|
||||
//
|
||||
// A write buffer pool is useful when the application has a modest number
|
||||
// writes over a large number of connections. when buffers are pooled, a larger
|
||||
// buffer size has a reduced impact on total memory use and has the benefit of
|
||||
// reducing system calls and frame overhead.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JoinMessages concatenates received messages to create a single io.Reader.
|
||||
// The string term is appended to each message. The returned reader does not
|
||||
// support concurrent calls to the Read method.
|
||||
func JoinMessages(c *Conn, term string) io.Reader {
|
||||
return &joinReader{c: c, term: term}
|
||||
}
|
||||
|
||||
type joinReader struct {
|
||||
c *Conn
|
||||
term string
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *joinReader) Read(p []byte) (int, error) {
|
||||
if r.r == nil {
|
||||
var err error
|
||||
_, r.r, err = r.c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r.term != "" {
|
||||
r.r = io.MultiReader(r.r, strings.NewReader(r.term))
|
||||
}
|
||||
}
|
||||
n, err := r.r.Read(p)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
55
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
55
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
16
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
16
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan struct{}, 1)
|
||||
mu <- struct{}{}
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
forwardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.forwardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: http.MethodConnect,
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
365
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
365
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
@ -0,0 +1,365 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
//
|
||||
// It is safe to call Upgrader's methods concurrently.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie). To specify
|
||||
// subprotocols supported by the server, set Upgrader.Subprotocols directly.
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != http.MethodGet {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
21
vendor/github.com/gorilla/websocket/tls_handshake.go
generated
vendored
Normal file
21
vendor/github.com/gorilla/websocket/tls_handshake.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.HandshakeContext(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
21
vendor/github.com/gorilla/websocket/tls_handshake_116.go
generated
vendored
Normal file
21
vendor/github.com/gorilla/websocket/tls_handshake_116.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
//go:build !go1.17
|
||||
// +build !go1.17
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Token octets per RFC 2616.
|
||||
var isTokenOctet = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
// skipSpace returns a slice of the string s with all leading RFC 2616 linear
|
||||
// whitespace removed.
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if b := s[i]; b != ' ' && b != '\t' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
// nextToken returns the leading RFC 2616 token of s and the string following
|
||||
// the token.
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if !isTokenOctet[s[i]] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
|
||||
// and the string following the token or quoted string.
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding as
|
||||
// defined in RFC 4790.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
@ -0,0 +1,473 @@
|
||||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
4
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
4
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
|
||||
// +build darwin freebsd openbsd netbsd dragonfly hurd
|
||||
// +build !appengine
|
||||
|
||||
package isatty
|
||||
|
29
vendor/github.com/mxk/go-flowrate/LICENSE
generated
vendored
Normal file
29
vendor/github.com/mxk/go-flowrate/LICENSE
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
* Neither the name of the go-flowrate project nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
267
vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go
generated
vendored
Normal file
267
vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go
generated
vendored
Normal file
@ -0,0 +1,267 @@
|
||||
//
|
||||
// Written by Maxim Khitrov (November 2012)
|
||||
//
|
||||
|
||||
// Package flowrate provides the tools for monitoring and limiting the flow rate
|
||||
// of an arbitrary data stream.
|
||||
package flowrate
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Monitor monitors and limits the transfer rate of a data stream.
|
||||
type Monitor struct {
|
||||
mu sync.Mutex // Mutex guarding access to all internal fields
|
||||
active bool // Flag indicating an active transfer
|
||||
start time.Duration // Transfer start time (clock() value)
|
||||
bytes int64 // Total number of bytes transferred
|
||||
samples int64 // Total number of samples taken
|
||||
|
||||
rSample float64 // Most recent transfer rate sample (bytes per second)
|
||||
rEMA float64 // Exponential moving average of rSample
|
||||
rPeak float64 // Peak transfer rate (max of all rSamples)
|
||||
rWindow float64 // rEMA window (seconds)
|
||||
|
||||
sBytes int64 // Number of bytes transferred since sLast
|
||||
sLast time.Duration // Most recent sample time (stop time when inactive)
|
||||
sRate time.Duration // Sampling rate
|
||||
|
||||
tBytes int64 // Number of bytes expected in the current transfer
|
||||
tLast time.Duration // Time of the most recent transfer of at least 1 byte
|
||||
}
|
||||
|
||||
// New creates a new flow control monitor. Instantaneous transfer rate is
|
||||
// measured and updated for each sampleRate interval. windowSize determines the
|
||||
// weight of each sample in the exponential moving average (EMA) calculation.
|
||||
// The exact formulas are:
|
||||
//
|
||||
// sampleTime = currentTime - prevSampleTime
|
||||
// sampleRate = byteCount / sampleTime
|
||||
// weight = 1 - exp(-sampleTime/windowSize)
|
||||
// newRate = weight*sampleRate + (1-weight)*oldRate
|
||||
//
|
||||
// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s,
|
||||
// respectively.
|
||||
func New(sampleRate, windowSize time.Duration) *Monitor {
|
||||
if sampleRate = clockRound(sampleRate); sampleRate <= 0 {
|
||||
sampleRate = 5 * clockRate
|
||||
}
|
||||
if windowSize <= 0 {
|
||||
windowSize = 1 * time.Second
|
||||
}
|
||||
now := clock()
|
||||
return &Monitor{
|
||||
active: true,
|
||||
start: now,
|
||||
rWindow: windowSize.Seconds(),
|
||||
sLast: now,
|
||||
sRate: sampleRate,
|
||||
tLast: now,
|
||||
}
|
||||
}
|
||||
|
||||
// Update records the transfer of n bytes and returns n. It should be called
|
||||
// after each Read/Write operation, even if n is 0.
|
||||
func (m *Monitor) Update(n int) int {
|
||||
m.mu.Lock()
|
||||
m.update(n)
|
||||
m.mu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// IO is a convenience method intended to wrap io.Reader and io.Writer method
|
||||
// execution. It calls m.Update(n) and then returns (n, err) unmodified.
|
||||
func (m *Monitor) IO(n int, err error) (int, error) {
|
||||
return m.Update(n), err
|
||||
}
|
||||
|
||||
// Done marks the transfer as finished and prevents any further updates or
|
||||
// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and
|
||||
// Limit methods become NOOPs. It returns the total number of bytes transferred.
|
||||
func (m *Monitor) Done() int64 {
|
||||
m.mu.Lock()
|
||||
if now := m.update(0); m.sBytes > 0 {
|
||||
m.reset(now)
|
||||
}
|
||||
m.active = false
|
||||
m.tLast = 0
|
||||
n := m.bytes
|
||||
m.mu.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// timeRemLimit is the maximum Status.TimeRem value.
|
||||
const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second
|
||||
|
||||
// Status represents the current Monitor status. All transfer rates are in bytes
|
||||
// per second rounded to the nearest byte.
|
||||
type Status struct {
|
||||
Active bool // Flag indicating an active transfer
|
||||
Start time.Time // Transfer start time
|
||||
Duration time.Duration // Time period covered by the statistics
|
||||
Idle time.Duration // Time since the last transfer of at least 1 byte
|
||||
Bytes int64 // Total number of bytes transferred
|
||||
Samples int64 // Total number of samples taken
|
||||
InstRate int64 // Instantaneous transfer rate
|
||||
CurRate int64 // Current transfer rate (EMA of InstRate)
|
||||
AvgRate int64 // Average transfer rate (Bytes / Duration)
|
||||
PeakRate int64 // Maximum instantaneous transfer rate
|
||||
BytesRem int64 // Number of bytes remaining in the transfer
|
||||
TimeRem time.Duration // Estimated time to completion
|
||||
Progress Percent // Overall transfer progress
|
||||
}
|
||||
|
||||
// Status returns current transfer status information. The returned value
|
||||
// becomes static after a call to Done.
|
||||
func (m *Monitor) Status() Status {
|
||||
m.mu.Lock()
|
||||
now := m.update(0)
|
||||
s := Status{
|
||||
Active: m.active,
|
||||
Start: clockToTime(m.start),
|
||||
Duration: m.sLast - m.start,
|
||||
Idle: now - m.tLast,
|
||||
Bytes: m.bytes,
|
||||
Samples: m.samples,
|
||||
PeakRate: round(m.rPeak),
|
||||
BytesRem: m.tBytes - m.bytes,
|
||||
Progress: percentOf(float64(m.bytes), float64(m.tBytes)),
|
||||
}
|
||||
if s.BytesRem < 0 {
|
||||
s.BytesRem = 0
|
||||
}
|
||||
if s.Duration > 0 {
|
||||
rAvg := float64(s.Bytes) / s.Duration.Seconds()
|
||||
s.AvgRate = round(rAvg)
|
||||
if s.Active {
|
||||
s.InstRate = round(m.rSample)
|
||||
s.CurRate = round(m.rEMA)
|
||||
if s.BytesRem > 0 {
|
||||
if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 {
|
||||
ns := float64(s.BytesRem) / tRate * 1e9
|
||||
if ns > float64(timeRemLimit) {
|
||||
ns = float64(timeRemLimit)
|
||||
}
|
||||
s.TimeRem = clockRound(time.Duration(ns))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
m.mu.Unlock()
|
||||
return s
|
||||
}
|
||||
|
||||
// Limit restricts the instantaneous (per-sample) data flow to rate bytes per
|
||||
// second. It returns the maximum number of bytes (0 <= n <= want) that may be
|
||||
// transferred immediately without exceeding the limit. If block == true, the
|
||||
// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1,
|
||||
// or the transfer is inactive (after a call to Done).
|
||||
//
|
||||
// At least one byte is always allowed to be transferred in any given sampling
|
||||
// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate
|
||||
// is 10 bytes per second.
|
||||
//
|
||||
// For usage examples, see the implementation of Reader and Writer in io.go.
|
||||
func (m *Monitor) Limit(want int, rate int64, block bool) (n int) {
|
||||
if want < 1 || rate < 1 {
|
||||
return want
|
||||
}
|
||||
m.mu.Lock()
|
||||
|
||||
// Determine the maximum number of bytes that can be sent in one sample
|
||||
limit := round(float64(rate) * m.sRate.Seconds())
|
||||
if limit <= 0 {
|
||||
limit = 1
|
||||
}
|
||||
|
||||
// If block == true, wait until m.sBytes < limit
|
||||
if now := m.update(0); block {
|
||||
for m.sBytes >= limit && m.active {
|
||||
now = m.waitNextSample(now)
|
||||
}
|
||||
}
|
||||
|
||||
// Make limit <= want (unlimited if the transfer is no longer active)
|
||||
if limit -= m.sBytes; limit > int64(want) || !m.active {
|
||||
limit = int64(want)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
if limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
return int(limit)
|
||||
}
|
||||
|
||||
// SetTransferSize specifies the total size of the data transfer, which allows
|
||||
// the Monitor to calculate the overall progress and time to completion.
|
||||
func (m *Monitor) SetTransferSize(bytes int64) {
|
||||
if bytes < 0 {
|
||||
bytes = 0
|
||||
}
|
||||
m.mu.Lock()
|
||||
m.tBytes = bytes
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
// update accumulates the transferred byte count for the current sample until
|
||||
// clock() - m.sLast >= m.sRate. The monitor status is updated once the current
|
||||
// sample is done.
|
||||
func (m *Monitor) update(n int) (now time.Duration) {
|
||||
if !m.active {
|
||||
return
|
||||
}
|
||||
if now = clock(); n > 0 {
|
||||
m.tLast = now
|
||||
}
|
||||
m.sBytes += int64(n)
|
||||
if sTime := now - m.sLast; sTime >= m.sRate {
|
||||
t := sTime.Seconds()
|
||||
if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak {
|
||||
m.rPeak = m.rSample
|
||||
}
|
||||
|
||||
// Exponential moving average using a method similar to *nix load
|
||||
// average calculation. Longer sampling periods carry greater weight.
|
||||
if m.samples > 0 {
|
||||
w := math.Exp(-t / m.rWindow)
|
||||
m.rEMA = m.rSample + w*(m.rEMA-m.rSample)
|
||||
} else {
|
||||
m.rEMA = m.rSample
|
||||
}
|
||||
m.reset(now)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// reset clears the current sample state in preparation for the next sample.
|
||||
func (m *Monitor) reset(sampleTime time.Duration) {
|
||||
m.bytes += m.sBytes
|
||||
m.samples++
|
||||
m.sBytes = 0
|
||||
m.sLast = sampleTime
|
||||
}
|
||||
|
||||
// waitNextSample sleeps for the remainder of the current sample. The lock is
|
||||
// released and reacquired during the actual sleep period, so it's possible for
|
||||
// the transfer to be inactive when this method returns.
|
||||
func (m *Monitor) waitNextSample(now time.Duration) time.Duration {
|
||||
const minWait = 5 * time.Millisecond
|
||||
current := m.sLast
|
||||
|
||||
// sleep until the last sample time changes (ideally, just one iteration)
|
||||
for m.sLast == current && m.active {
|
||||
d := current + m.sRate - now
|
||||
m.mu.Unlock()
|
||||
if d < minWait {
|
||||
d = minWait
|
||||
}
|
||||
time.Sleep(d)
|
||||
m.mu.Lock()
|
||||
now = m.update(0)
|
||||
}
|
||||
return now
|
||||
}
|
133
vendor/github.com/mxk/go-flowrate/flowrate/io.go
generated
vendored
Normal file
133
vendor/github.com/mxk/go-flowrate/flowrate/io.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
//
|
||||
// Written by Maxim Khitrov (November 2012)
|
||||
//
|
||||
|
||||
package flowrate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ErrLimit is returned by the Writer when a non-blocking write is short due to
|
||||
// the transfer rate limit.
|
||||
var ErrLimit = errors.New("flowrate: flow rate limit exceeded")
|
||||
|
||||
// Limiter is implemented by the Reader and Writer to provide a consistent
|
||||
// interface for monitoring and controlling data transfer.
|
||||
type Limiter interface {
|
||||
Done() int64
|
||||
Status() Status
|
||||
SetTransferSize(bytes int64)
|
||||
SetLimit(new int64) (old int64)
|
||||
SetBlocking(new bool) (old bool)
|
||||
}
|
||||
|
||||
// Reader implements io.ReadCloser with a restriction on the rate of data
|
||||
// transfer.
|
||||
type Reader struct {
|
||||
io.Reader // Data source
|
||||
*Monitor // Flow control monitor
|
||||
|
||||
limit int64 // Rate limit in bytes per second (unlimited when <= 0)
|
||||
block bool // What to do when no new bytes can be read due to the limit
|
||||
}
|
||||
|
||||
// NewReader restricts all Read operations on r to limit bytes per second.
|
||||
func NewReader(r io.Reader, limit int64) *Reader {
|
||||
return &Reader{r, New(0, 0), limit, true}
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p without exceeding the current transfer
|
||||
// rate limit. It returns (0, nil) immediately if r is non-blocking and no new
|
||||
// bytes can be read at this time.
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
p = p[:r.Limit(len(p), r.limit, r.block)]
|
||||
if len(p) > 0 {
|
||||
n, err = r.IO(r.Reader.Read(p))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetLimit changes the transfer rate limit to new bytes per second and returns
|
||||
// the previous setting.
|
||||
func (r *Reader) SetLimit(new int64) (old int64) {
|
||||
old, r.limit = r.limit, new
|
||||
return
|
||||
}
|
||||
|
||||
// SetBlocking changes the blocking behavior and returns the previous setting. A
|
||||
// Read call on a non-blocking reader returns immediately if no additional bytes
|
||||
// may be read at this time due to the rate limit.
|
||||
func (r *Reader) SetBlocking(new bool) (old bool) {
|
||||
old, r.block = r.block, new
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the underlying reader if it implements the io.Closer interface.
|
||||
func (r *Reader) Close() error {
|
||||
defer r.Done()
|
||||
if c, ok := r.Reader.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writer implements io.WriteCloser with a restriction on the rate of data
|
||||
// transfer.
|
||||
type Writer struct {
|
||||
io.Writer // Data destination
|
||||
*Monitor // Flow control monitor
|
||||
|
||||
limit int64 // Rate limit in bytes per second (unlimited when <= 0)
|
||||
block bool // What to do when no new bytes can be written due to the limit
|
||||
}
|
||||
|
||||
// NewWriter restricts all Write operations on w to limit bytes per second. The
|
||||
// transfer rate and the default blocking behavior (true) can be changed
|
||||
// directly on the returned *Writer.
|
||||
func NewWriter(w io.Writer, limit int64) *Writer {
|
||||
return &Writer{w, New(0, 0), limit, true}
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream without
|
||||
// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is
|
||||
// non-blocking and no additional bytes can be written at this time.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
var c int
|
||||
for len(p) > 0 && err == nil {
|
||||
s := p[:w.Limit(len(p), w.limit, w.block)]
|
||||
if len(s) > 0 {
|
||||
c, err = w.IO(w.Writer.Write(s))
|
||||
} else {
|
||||
return n, ErrLimit
|
||||
}
|
||||
p = p[c:]
|
||||
n += c
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetLimit changes the transfer rate limit to new bytes per second and returns
|
||||
// the previous setting.
|
||||
func (w *Writer) SetLimit(new int64) (old int64) {
|
||||
old, w.limit = w.limit, new
|
||||
return
|
||||
}
|
||||
|
||||
// SetBlocking changes the blocking behavior and returns the previous setting. A
|
||||
// Write call on a non-blocking writer returns as soon as no additional bytes
|
||||
// may be written at this time due to the rate limit.
|
||||
func (w *Writer) SetBlocking(new bool) (old bool) {
|
||||
old, w.block = w.block, new
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the underlying writer if it implements the io.Closer interface.
|
||||
func (w *Writer) Close() error {
|
||||
defer w.Done()
|
||||
if c, ok := w.Writer.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
67
vendor/github.com/mxk/go-flowrate/flowrate/util.go
generated
vendored
Normal file
67
vendor/github.com/mxk/go-flowrate/flowrate/util.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
//
|
||||
// Written by Maxim Khitrov (November 2012)
|
||||
//
|
||||
|
||||
package flowrate
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// clockRate is the resolution and precision of clock().
|
||||
const clockRate = 20 * time.Millisecond
|
||||
|
||||
// czero is the process start time rounded down to the nearest clockRate
|
||||
// increment.
|
||||
var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate
|
||||
|
||||
// clock returns a low resolution timestamp relative to the process start time.
|
||||
func clock() time.Duration {
|
||||
return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero
|
||||
}
|
||||
|
||||
// clockToTime converts a clock() timestamp to an absolute time.Time value.
|
||||
func clockToTime(c time.Duration) time.Time {
|
||||
return time.Unix(0, int64(czero+c))
|
||||
}
|
||||
|
||||
// clockRound returns d rounded to the nearest clockRate increment.
|
||||
func clockRound(d time.Duration) time.Duration {
|
||||
return (d + clockRate>>1) / clockRate * clockRate
|
||||
}
|
||||
|
||||
// round returns x rounded to the nearest int64 (non-negative values only).
|
||||
func round(x float64) int64 {
|
||||
if _, frac := math.Modf(x); frac >= 0.5 {
|
||||
return int64(math.Ceil(x))
|
||||
}
|
||||
return int64(math.Floor(x))
|
||||
}
|
||||
|
||||
// Percent represents a percentage in increments of 1/1000th of a percent.
|
||||
type Percent uint32
|
||||
|
||||
// percentOf calculates what percent of the total is x.
|
||||
func percentOf(x, total float64) Percent {
|
||||
if x < 0 || total <= 0 {
|
||||
return 0
|
||||
} else if p := round(x / total * 1e5); p <= math.MaxUint32 {
|
||||
return Percent(p)
|
||||
}
|
||||
return Percent(math.MaxUint32)
|
||||
}
|
||||
|
||||
func (p Percent) Float() float64 {
|
||||
return float64(p) * 1e-3
|
||||
}
|
||||
|
||||
func (p Percent) String() string {
|
||||
var buf [12]byte
|
||||
b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10)
|
||||
n := len(b)
|
||||
b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10)
|
||||
b[n] = '.'
|
||||
return string(append(b, '%'))
|
||||
}
|
2
vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
generated
vendored
@ -226,7 +226,7 @@ type ValidatingAdmissionPolicySpec struct {
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"`
|
||||
Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"`
|
||||
}
|
||||
|
||||
type MatchCondition v1.MatchCondition
|
||||
|
2
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
@ -242,7 +242,7 @@ type ValidatingAdmissionPolicySpec struct {
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"`
|
||||
Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"`
|
||||
}
|
||||
|
||||
// ParamKind is a tuple of Group Kind and Version.
|
||||
|
26
vendor/k8s.io/api/batch/v1/generated.proto
generated
vendored
26
vendor/k8s.io/api/batch/v1/generated.proto
generated
vendored
@ -229,8 +229,8 @@ message JobSpec {
|
||||
// batch.kubernetes.io/job-index-failure-count annotation. It can only
|
||||
// be set when Job's completionMode=Indexed, and the Pod's restart
|
||||
// policy is Never. The field is immutable.
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
optional int32 backoffLimitPerIndex = 12;
|
||||
|
||||
@ -242,8 +242,8 @@ message JobSpec {
|
||||
// It can only be specified when backoffLimitPerIndex is set.
|
||||
// It can be null or up to completions. It is required and must be
|
||||
// less than or equal to 10^4 when is completions greater than 10^5.
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
optional int32 maxFailedIndexes = 13;
|
||||
|
||||
@ -326,7 +326,8 @@ message JobSpec {
|
||||
//
|
||||
// When using podFailurePolicy, Failed is the the only allowed value.
|
||||
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
|
||||
// This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.
|
||||
// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
|
||||
// This is on by default.
|
||||
// +optional
|
||||
optional string podReplacementPolicy = 14;
|
||||
}
|
||||
@ -375,8 +376,8 @@ message JobStatus {
|
||||
// The number of pods which are terminating (in phase Pending or Running
|
||||
// and have a deletionTimestamp).
|
||||
//
|
||||
// This field is alpha-level. The job controller populates the field when
|
||||
// the feature gate JobPodReplacementPolicy is enabled (disabled by default).
|
||||
// This field is beta-level. The job controller populates the field when
|
||||
// the feature gate JobPodReplacementPolicy is enabled (enabled by default).
|
||||
// +optional
|
||||
optional int32 terminating = 11;
|
||||
|
||||
@ -398,8 +399,8 @@ message JobStatus {
|
||||
// last element of the series, separated by a hyphen.
|
||||
// For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
|
||||
// represented as "1,3-5,7".
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
optional string failedIndexes = 10;
|
||||
|
||||
@ -421,9 +422,6 @@ message JobStatus {
|
||||
optional UncountedTerminatedPods uncountedTerminatedPods = 8;
|
||||
|
||||
// The number of pods which have a Ready condition.
|
||||
//
|
||||
// This field is beta-level. The job controller populates the field when
|
||||
// the feature gate JobReadyPods is enabled (enabled by default).
|
||||
// +optional
|
||||
optional int32 ready = 9;
|
||||
}
|
||||
@ -512,8 +510,8 @@ message PodFailurePolicyRule {
|
||||
// running pods are terminated.
|
||||
// - FailIndex: indicates that the pod's index is marked as Failed and will
|
||||
// not be restarted.
|
||||
// This value is alpha-level. It can be used when the
|
||||
// `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).
|
||||
// This value is beta-level. It can be used when the
|
||||
// `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
||||
// - Ignore: indicates that the counter towards the .backoffLimit is not
|
||||
// incremented and a replacement pod is created.
|
||||
// - Count: indicates that the pod is handled in the default way - the
|
||||
|
46
vendor/k8s.io/api/batch/v1/types.go
generated
vendored
46
vendor/k8s.io/api/batch/v1/types.go
generated
vendored
@ -124,6 +124,7 @@ const (
|
||||
// This is an action which might be taken on a pod failure - mark the
|
||||
// Job's index as failed to avoid restarts within this index. This action
|
||||
// can only be used when backoffLimitPerIndex is set.
|
||||
// This value is beta-level.
|
||||
PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
|
||||
|
||||
// This is an action which might be taken on a pod failure - the counter towards
|
||||
@ -218,8 +219,8 @@ type PodFailurePolicyRule struct {
|
||||
// running pods are terminated.
|
||||
// - FailIndex: indicates that the pod's index is marked as Failed and will
|
||||
// not be restarted.
|
||||
// This value is alpha-level. It can be used when the
|
||||
// `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).
|
||||
// This value is beta-level. It can be used when the
|
||||
// `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).
|
||||
// - Ignore: indicates that the counter towards the .backoffLimit is not
|
||||
// incremented and a replacement pod is created.
|
||||
// - Count: indicates that the pod is handled in the default way - the
|
||||
@ -303,8 +304,8 @@ type JobSpec struct {
|
||||
// batch.kubernetes.io/job-index-failure-count annotation. It can only
|
||||
// be set when Job's completionMode=Indexed, and the Pod's restart
|
||||
// policy is Never. The field is immutable.
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"`
|
||||
|
||||
@ -316,8 +317,8 @@ type JobSpec struct {
|
||||
// It can only be specified when backoffLimitPerIndex is set.
|
||||
// It can be null or up to completions. It is required and must be
|
||||
// less than or equal to 10^4 when is completions greater than 10^5.
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"`
|
||||
|
||||
@ -405,7 +406,8 @@ type JobSpec struct {
|
||||
//
|
||||
// When using podFailurePolicy, Failed is the the only allowed value.
|
||||
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
|
||||
// This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.
|
||||
// This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle.
|
||||
// This is on by default.
|
||||
// +optional
|
||||
PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"`
|
||||
}
|
||||
@ -454,8 +456,8 @@ type JobStatus struct {
|
||||
// The number of pods which are terminating (in phase Pending or Running
|
||||
// and have a deletionTimestamp).
|
||||
//
|
||||
// This field is alpha-level. The job controller populates the field when
|
||||
// the feature gate JobPodReplacementPolicy is enabled (disabled by default).
|
||||
// This field is beta-level. The job controller populates the field when
|
||||
// the feature gate JobPodReplacementPolicy is enabled (enabled by default).
|
||||
// +optional
|
||||
Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"`
|
||||
|
||||
@ -477,8 +479,8 @@ type JobStatus struct {
|
||||
// last element of the series, separated by a hyphen.
|
||||
// For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
|
||||
// represented as "1,3-5,7".
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"`
|
||||
|
||||
@ -500,9 +502,6 @@ type JobStatus struct {
|
||||
UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"`
|
||||
|
||||
// The number of pods which have a Ready condition.
|
||||
//
|
||||
// This field is beta-level. The job controller populates the field when
|
||||
// the feature gate JobReadyPods is enabled (enabled by default).
|
||||
// +optional
|
||||
Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"`
|
||||
}
|
||||
@ -535,6 +534,25 @@ const (
|
||||
JobFailureTarget JobConditionType = "FailureTarget"
|
||||
)
|
||||
|
||||
const (
|
||||
// JobReasonPodFailurePolicy reason indicates a job failure condition is added due to
|
||||
// a failed pod matching a pod failure policy rule
|
||||
// https://kep.k8s.io/3329
|
||||
// This is currently a beta field.
|
||||
JobReasonPodFailurePolicy string = "PodFailurePolicy"
|
||||
// JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of
|
||||
// times higher than backOffLimit times.
|
||||
JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded"
|
||||
// JobReasponDeadlineExceeded means job duration is past ActiveDeadline
|
||||
JobReasonDeadlineExceeded string = "DeadlineExceeded"
|
||||
// JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed
|
||||
// This const is used in beta-level feature: https://kep.k8s.io/3850.
|
||||
JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded"
|
||||
// JobReasonFailedIndexes means Job has failed indexes.
|
||||
// This const is used in beta-level feature: https://kep.k8s.io/3850.
|
||||
JobReasonFailedIndexes string = "FailedIndexes"
|
||||
)
|
||||
|
||||
// JobCondition describes current state of a job.
|
||||
type JobCondition struct {
|
||||
// Type of job condition, Complete or Failed.
|
||||
|
14
vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
generated
vendored
14
vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
generated
vendored
@ -117,15 +117,15 @@ var map_JobSpec = map[string]string{
|
||||
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
|
||||
"podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).",
|
||||
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
|
||||
"backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).",
|
||||
"maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).",
|
||||
"backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
|
||||
"maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
|
||||
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
|
||||
"manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
|
||||
"template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
|
||||
"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.",
|
||||
"completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
|
||||
"suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
|
||||
"podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.",
|
||||
"podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.",
|
||||
}
|
||||
|
||||
func (JobSpec) SwaggerDoc() map[string]string {
|
||||
@ -140,11 +140,11 @@ var map_JobStatus = map[string]string{
|
||||
"active": "The number of pending and running pods.",
|
||||
"succeeded": "The number of pods which reached phase Succeeded.",
|
||||
"failed": "The number of pods which reached phase Failed.",
|
||||
"terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default).",
|
||||
"terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).",
|
||||
"completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".",
|
||||
"failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).",
|
||||
"failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
|
||||
"uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.",
|
||||
"ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).",
|
||||
"ready": "The number of pods which have a Ready condition.",
|
||||
}
|
||||
|
||||
func (JobStatus) SwaggerDoc() map[string]string {
|
||||
@ -193,7 +193,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_PodFailurePolicyRule = map[string]string{
|
||||
"": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.",
|
||||
"action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
|
||||
"action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
|
||||
"onExitCodes": "Represents the requirement on the container exit codes.",
|
||||
"onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.",
|
||||
}
|
||||
|
3895
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
3895
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
181
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
181
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
@ -228,10 +228,8 @@ message CSIPersistentVolumeSource {
|
||||
// nodeExpandSecretRef is a reference to the secret object containing
|
||||
// sensitive information to pass to the CSI driver to complete the CSI
|
||||
// NodeExpandVolume call.
|
||||
// This is a beta field which is enabled default by CSINodeExpandSecret feature gate.
|
||||
// This field is optional, may be omitted if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +featureGate=CSINodeExpandSecret
|
||||
// +optional
|
||||
optional SecretReference nodeExpandSecretRef = 10;
|
||||
}
|
||||
@ -433,6 +431,40 @@ message ClientIPConfig {
|
||||
optional int32 timeoutSeconds = 1;
|
||||
}
|
||||
|
||||
// ClusterTrustBundleProjection describes how to select a set of
|
||||
// ClusterTrustBundle objects and project their contents into the pod
|
||||
// filesystem.
|
||||
message ClusterTrustBundleProjection {
|
||||
// Select a single ClusterTrustBundle by object name. Mutually-exclusive
|
||||
// with signerName and labelSelector.
|
||||
// +optional
|
||||
optional string name = 1;
|
||||
|
||||
// Select all ClusterTrustBundles that match this signer name.
|
||||
// Mutually-exclusive with name. The contents of all selected
|
||||
// ClusterTrustBundles will be unified and deduplicated.
|
||||
// +optional
|
||||
optional string signerName = 2;
|
||||
|
||||
// Select all ClusterTrustBundles that match this label selector. Only has
|
||||
// effect if signerName is set. Mutually-exclusive with name. If unset,
|
||||
// interpreted as "match nothing". If set but empty, interpreted as "match
|
||||
// everything".
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3;
|
||||
|
||||
// If true, don't block pod startup if the referenced ClusterTrustBundle(s)
|
||||
// aren't available. If using name, then the named ClusterTrustBundle is
|
||||
// allowed not to exist. If using signerName, then the combination of
|
||||
// signerName and labelSelector is allowed to match zero
|
||||
// ClusterTrustBundles.
|
||||
// +optional
|
||||
optional bool optional = 5;
|
||||
|
||||
// Relative path from the volume root to write the bundle.
|
||||
optional string path = 4;
|
||||
}
|
||||
|
||||
// Information about the condition of a component.
|
||||
message ComponentCondition {
|
||||
// Type of condition for a component.
|
||||
@ -1159,7 +1191,7 @@ message EndpointPort {
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
@ -2088,6 +2120,11 @@ message LifecycleHandler {
|
||||
// lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
// +optional
|
||||
optional TCPSocketAction tcpSocket = 3;
|
||||
|
||||
// Sleep represents the duration that the container should sleep before being terminated.
|
||||
// +featureGate=PodLifecycleSleepAction
|
||||
// +optional
|
||||
optional SleepAction sleep = 4;
|
||||
}
|
||||
|
||||
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
|
||||
@ -2171,6 +2208,15 @@ message LoadBalancerIngress {
|
||||
// +optional
|
||||
optional string hostname = 2;
|
||||
|
||||
// IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.
|
||||
// Setting this to "VIP" indicates that traffic is delivered to the node with
|
||||
// the destination set to the load-balancer's IP and port.
|
||||
// Setting this to "Proxy" indicates that traffic is delivered to the node or pod with
|
||||
// the destination set to the node's IP and node port or the pod's IP and port.
|
||||
// Service implementations may use this information to adjust traffic routing.
|
||||
// +optional
|
||||
optional string ipMode = 3;
|
||||
|
||||
// Ports is a list of records of service ports
|
||||
// If used, every port defined in the service should have an entry in it
|
||||
// +listType=atomic
|
||||
@ -2211,6 +2257,24 @@ message LocalVolumeSource {
|
||||
optional string fsType = 2;
|
||||
}
|
||||
|
||||
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation
|
||||
message ModifyVolumeStatus {
|
||||
// targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled
|
||||
optional string targetVolumeAttributesClassName = 1;
|
||||
|
||||
// status is the status of the ControllerModifyVolume operation. It can be in any of following states:
|
||||
// - Pending
|
||||
// Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
|
||||
// the specified VolumeAttributesClass not existing.
|
||||
// - InProgress
|
||||
// InProgress indicates that the volume is being modified.
|
||||
// - Infeasible
|
||||
// Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
|
||||
// resolve the error, a valid VolumeAttributesClass needs to be specified.
|
||||
// Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.
|
||||
optional string status = 2;
|
||||
}
|
||||
|
||||
// Represents an NFS mount that lasts the lifetime of a pod.
|
||||
// NFS volumes do not support ownership management or SELinux relabeling.
|
||||
message NFSVolumeSource {
|
||||
@ -2816,7 +2880,7 @@ message PersistentVolumeClaimSpec {
|
||||
// status field of the claim.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
|
||||
// +optional
|
||||
optional ResourceRequirements resources = 2;
|
||||
optional VolumeResourceRequirements resources = 2;
|
||||
|
||||
// volumeName is the binding reference to the PersistentVolume backing this claim.
|
||||
// +optional
|
||||
@ -2868,6 +2932,22 @@ message PersistentVolumeClaimSpec {
|
||||
// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
// +optional
|
||||
optional TypedObjectReference dataSourceRef = 8;
|
||||
|
||||
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
// If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
// will be set by the persistentvolume controller if it exists.
|
||||
// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
// exists.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
|
||||
// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
optional string volumeAttributesClassName = 9;
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
|
||||
@ -2957,6 +3037,20 @@ message PersistentVolumeClaimStatus {
|
||||
// +mapType=granular
|
||||
// +optional
|
||||
map<string, string> allocatedResourceStatuses = 7;
|
||||
|
||||
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
||||
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
optional string currentVolumeAttributesClassName = 8;
|
||||
|
||||
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
||||
// When this is unset, there is no ModifyVolume operation being attempted.
|
||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
optional ModifyVolumeStatus modifyVolumeStatus = 9;
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimTemplate is used to produce
|
||||
@ -3161,6 +3255,17 @@ message PersistentVolumeSpec {
|
||||
// This field influences the scheduling of pods that use this volume.
|
||||
// +optional
|
||||
optional VolumeNodeAffinity nodeAffinity = 9;
|
||||
|
||||
// Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
|
||||
// is not allowed. When this field is not set, it indicates that this volume does not belong to any
|
||||
// VolumeAttributesClass. This field is mutable and can be changed by the CSI driver
|
||||
// after a volume has been updated successfully to a new class.
|
||||
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
|
||||
// PersistentVolumeClaims during the binding process.
|
||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
optional string volumeAttributesClassName = 10;
|
||||
}
|
||||
|
||||
// PersistentVolumeStatus is the current status of a persistent volume.
|
||||
@ -3253,6 +3358,7 @@ message PodAffinity {
|
||||
// a pod of the set of pods is running
|
||||
message PodAffinityTerm {
|
||||
// A label query over a set of resources, in this case pods.
|
||||
// If it's null, this PodAffinityTerm matches with no Pods.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
|
||||
|
||||
@ -3277,6 +3383,32 @@ message PodAffinityTerm {
|
||||
// An empty selector ({}) matches all namespaces.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4;
|
||||
|
||||
// MatchLabelKeys is a set of pod label keys to select which pods will
|
||||
// be taken into consideration. The keys are used to lookup values from the
|
||||
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
|
||||
// to select the group of existing pods which pods will be taken into consideration
|
||||
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
|
||||
// pod labels will be ignored. The default value is empty.
|
||||
// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
|
||||
// Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
|
||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated string matchLabelKeys = 5;
|
||||
|
||||
// MismatchLabelKeys is a set of pod label keys to select which pods will
|
||||
// be taken into consideration. The keys are used to lookup values from the
|
||||
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
|
||||
// to select the group of existing pods which pods will be taken into consideration
|
||||
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
|
||||
// pod labels will be ignored. The default value is empty.
|
||||
// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
|
||||
// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
|
||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated string mismatchLabelKeys = 6;
|
||||
}
|
||||
|
||||
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
|
||||
@ -5250,7 +5382,7 @@ message ServicePort {
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
@ -5553,6 +5685,12 @@ message SessionAffinityConfig {
|
||||
optional ClientIPConfig clientIP = 1;
|
||||
}
|
||||
|
||||
// SleepAction describes a "sleep" action.
|
||||
message SleepAction {
|
||||
// Seconds is the number of seconds to sleep.
|
||||
optional int64 seconds = 1;
|
||||
}
|
||||
|
||||
// Represents a StorageOS persistent volume resource.
|
||||
message StorageOSPersistentVolumeSource {
|
||||
// volumeName is the human-readable name of the StorageOS volume. Volume
|
||||
@ -5960,6 +6098,39 @@ message VolumeProjection {
|
||||
// serviceAccountToken is information about the serviceAccountToken data to project
|
||||
// +optional
|
||||
optional ServiceAccountTokenProjection serviceAccountToken = 4;
|
||||
|
||||
// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
|
||||
// of ClusterTrustBundle objects in an auto-updating file.
|
||||
//
|
||||
// Alpha, gated by the ClusterTrustBundleProjection feature gate.
|
||||
//
|
||||
// ClusterTrustBundle objects can either be selected by name, or by the
|
||||
// combination of signer name and a label selector.
|
||||
//
|
||||
// Kubelet performs aggressive normalization of the PEM contents written
|
||||
// into the pod filesystem. Esoteric PEM features such as inter-block
|
||||
// comments and block headers are stripped. Certificates are deduplicated.
|
||||
// The ordering of certificates within the file is arbitrary, and Kubelet
|
||||
// may change the order over time.
|
||||
//
|
||||
// +featureGate=ClusterTrustBundleProjection
|
||||
// +optional
|
||||
optional ClusterTrustBundleProjection clusterTrustBundle = 5;
|
||||
}
|
||||
|
||||
// VolumeResourceRequirements describes the storage resource requirements for a volume.
|
||||
message VolumeResourceRequirements {
|
||||
// Limits describes the maximum amount of compute resources allowed.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
// +optional
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> limits = 1;
|
||||
|
||||
// Requests describes the minimum amount of compute resources required.
|
||||
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
|
||||
// otherwise to an implementation-defined value. Requests cannot exceed Limits.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
// +optional
|
||||
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> requests = 2;
|
||||
}
|
||||
|
||||
// Represents the source of a volume to mount.
|
||||
|
216
vendor/k8s.io/api/core/v1/types.go
generated
vendored
216
vendor/k8s.io/api/core/v1/types.go
generated
vendored
@ -363,6 +363,16 @@ type PersistentVolumeSpec struct {
|
||||
// This field influences the scheduling of pods that use this volume.
|
||||
// +optional
|
||||
NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
|
||||
// Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
|
||||
// is not allowed. When this field is not set, it indicates that this volume does not belong to any
|
||||
// VolumeAttributesClass. This field is mutable and can be changed by the CSI driver
|
||||
// after a volume has been updated successfully to a new class.
|
||||
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
|
||||
// PersistentVolumeClaims during the binding process.
|
||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
|
||||
}
|
||||
|
||||
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
|
||||
@ -486,7 +496,7 @@ type PersistentVolumeClaimSpec struct {
|
||||
// status field of the claim.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
|
||||
// +optional
|
||||
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
|
||||
Resources VolumeResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
|
||||
// volumeName is the binding reference to the PersistentVolume backing this claim.
|
||||
// +optional
|
||||
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
|
||||
@ -533,6 +543,21 @@ type PersistentVolumeClaimSpec struct {
|
||||
// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
// +optional
|
||||
DataSourceRef *TypedObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"`
|
||||
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
// If specified, the CSI driver will create or update the volume with the attributes defined
|
||||
// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
|
||||
// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
|
||||
// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
|
||||
// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
|
||||
// will be set by the persistentvolume controller if it exists.
|
||||
// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
|
||||
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
|
||||
// exists.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
|
||||
// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
|
||||
}
|
||||
|
||||
type TypedObjectReference struct {
|
||||
@ -561,6 +586,11 @@ const (
|
||||
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
|
||||
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
|
||||
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
|
||||
|
||||
// Applying the target VolumeAttributesClass encountered an error
|
||||
PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
|
||||
// Volume is being modified
|
||||
PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume"
|
||||
)
|
||||
|
||||
// +enum
|
||||
@ -587,6 +617,38 @@ const (
|
||||
PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
|
||||
)
|
||||
|
||||
// +enum
|
||||
// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately
|
||||
type PersistentVolumeClaimModifyVolumeStatus string
|
||||
|
||||
const (
|
||||
// Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
|
||||
// the specified VolumeAttributesClass not existing
|
||||
PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending"
|
||||
// InProgress indicates that the volume is being modified
|
||||
PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress"
|
||||
// Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
|
||||
// resolve the error, a valid VolumeAttributesClass needs to be specified
|
||||
PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible"
|
||||
)
|
||||
|
||||
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation
|
||||
type ModifyVolumeStatus struct {
|
||||
// targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled
|
||||
TargetVolumeAttributesClassName string `json:"targetVolumeAttributesClassName,omitempty" protobuf:"bytes,1,opt,name=targetVolumeAttributesClassName"`
|
||||
// status is the status of the ControllerModifyVolume operation. It can be in any of following states:
|
||||
// - Pending
|
||||
// Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
|
||||
// the specified VolumeAttributesClass not existing.
|
||||
// - InProgress
|
||||
// InProgress indicates that the volume is being modified.
|
||||
// - Infeasible
|
||||
// Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
|
||||
// resolve the error, a valid VolumeAttributesClass needs to be specified.
|
||||
// Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.
|
||||
Status PersistentVolumeClaimModifyVolumeStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=PersistentVolumeClaimModifyVolumeStatus"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimCondition contains details about state of pvc
|
||||
type PersistentVolumeClaimCondition struct {
|
||||
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
|
||||
@ -693,6 +755,18 @@ type PersistentVolumeClaimStatus struct {
|
||||
// +mapType=granular
|
||||
// +optional
|
||||
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
|
||||
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
|
||||
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
|
||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
|
||||
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
|
||||
// When this is unset, there is no ModifyVolume operation being attempted.
|
||||
// This is an alpha field and requires enabling VolumeAttributesClass feature.
|
||||
// +featureGate=VolumeAttributesClass
|
||||
// +optional
|
||||
ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
|
||||
}
|
||||
|
||||
// +enum
|
||||
@ -1763,6 +1837,40 @@ type ServiceAccountTokenProjection struct {
|
||||
Path string `json:"path" protobuf:"bytes,3,opt,name=path"`
|
||||
}
|
||||
|
||||
// ClusterTrustBundleProjection describes how to select a set of
|
||||
// ClusterTrustBundle objects and project their contents into the pod
|
||||
// filesystem.
|
||||
type ClusterTrustBundleProjection struct {
|
||||
// Select a single ClusterTrustBundle by object name. Mutually-exclusive
|
||||
// with signerName and labelSelector.
|
||||
// +optional
|
||||
Name *string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"`
|
||||
|
||||
// Select all ClusterTrustBundles that match this signer name.
|
||||
// Mutually-exclusive with name. The contents of all selected
|
||||
// ClusterTrustBundles will be unified and deduplicated.
|
||||
// +optional
|
||||
SignerName *string `json:"signerName,omitempty" protobuf:"bytes,2,rep,name=signerName"`
|
||||
|
||||
// Select all ClusterTrustBundles that match this label selector. Only has
|
||||
// effect if signerName is set. Mutually-exclusive with name. If unset,
|
||||
// interpreted as "match nothing". If set but empty, interpreted as "match
|
||||
// everything".
|
||||
// +optional
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,3,rep,name=labelSelector"`
|
||||
|
||||
// If true, don't block pod startup if the referenced ClusterTrustBundle(s)
|
||||
// aren't available. If using name, then the named ClusterTrustBundle is
|
||||
// allowed not to exist. If using signerName, then the combination of
|
||||
// signerName and labelSelector is allowed to match zero
|
||||
// ClusterTrustBundles.
|
||||
// +optional
|
||||
Optional *bool `json:"optional,omitempty" protobuf:"varint,5,opt,name=optional"`
|
||||
|
||||
// Relative path from the volume root to write the bundle.
|
||||
Path string `json:"path" protobuf:"bytes,4,rep,name=path"`
|
||||
}
|
||||
|
||||
// Represents a projected volume source
|
||||
type ProjectedVolumeSource struct {
|
||||
// sources is the list of volume projections
|
||||
@ -1794,6 +1902,24 @@ type VolumeProjection struct {
|
||||
// serviceAccountToken is information about the serviceAccountToken data to project
|
||||
// +optional
|
||||
ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"`
|
||||
|
||||
// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
|
||||
// of ClusterTrustBundle objects in an auto-updating file.
|
||||
//
|
||||
// Alpha, gated by the ClusterTrustBundleProjection feature gate.
|
||||
//
|
||||
// ClusterTrustBundle objects can either be selected by name, or by the
|
||||
// combination of signer name and a label selector.
|
||||
//
|
||||
// Kubelet performs aggressive normalization of the PEM contents written
|
||||
// into the pod filesystem. Esoteric PEM features such as inter-block
|
||||
// comments and block headers are stripped. Certificates are deduplicated.
|
||||
// The ordering of certificates within the file is arbitrary, and Kubelet
|
||||
// may change the order over time.
|
||||
//
|
||||
// +featureGate=ClusterTrustBundleProjection
|
||||
// +optional
|
||||
ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"`
|
||||
}
|
||||
|
||||
const (
|
||||
@ -1894,10 +2020,8 @@ type CSIPersistentVolumeSource struct {
|
||||
// nodeExpandSecretRef is a reference to the secret object containing
|
||||
// sensitive information to pass to the CSI driver to complete the CSI
|
||||
// NodeExpandVolume call.
|
||||
// This is a beta field which is enabled default by CSINodeExpandSecret feature gate.
|
||||
// This field is optional, may be omitted if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +featureGate=CSINodeExpandSecret
|
||||
// +optional
|
||||
NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"`
|
||||
}
|
||||
@ -2272,6 +2396,12 @@ type ExecAction struct {
|
||||
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
|
||||
}
|
||||
|
||||
// SleepAction describes a "sleep" action.
|
||||
type SleepAction struct {
|
||||
// Seconds is the number of seconds to sleep.
|
||||
Seconds int64 `json:"seconds" protobuf:"bytes,1,opt,name=seconds"`
|
||||
}
|
||||
|
||||
// Probe describes a health check to be performed against a container to determine whether it is
|
||||
// alive or ready to receive traffic.
|
||||
type Probe struct {
|
||||
@ -2417,6 +2547,27 @@ type ResourceRequirements struct {
|
||||
Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"`
|
||||
}
|
||||
|
||||
// VolumeResourceRequirements describes the storage resource requirements for a volume.
|
||||
type VolumeResourceRequirements struct {
|
||||
// Limits describes the maximum amount of compute resources allowed.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
// +optional
|
||||
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
|
||||
// Requests describes the minimum amount of compute resources required.
|
||||
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
|
||||
// otherwise to an implementation-defined value. Requests cannot exceed Limits.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
// +optional
|
||||
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
|
||||
|
||||
// Claims got added by accident when volumes shared the ResourceRequirements struct
|
||||
// with containers. Stripping the field got added in 1.27 and was backported to 1.26.
|
||||
// Starting with Kubernetes 1.28, this field is not part of the volume API anymore.
|
||||
//
|
||||
// Future extensions must not use "claims" or field number 3.
|
||||
// Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"`
|
||||
}
|
||||
|
||||
// ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
type ResourceClaim struct {
|
||||
// Name must match the name of one entry in pod.spec.resourceClaims of
|
||||
@ -2646,6 +2797,10 @@ type LifecycleHandler struct {
|
||||
// lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
// +optional
|
||||
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
|
||||
// Sleep represents the duration that the container should sleep before being terminated.
|
||||
// +featureGate=PodLifecycleSleepAction
|
||||
// +optional
|
||||
Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
|
||||
}
|
||||
|
||||
// Lifecycle describes actions that the management system should take in response to container lifecycle
|
||||
@ -2845,6 +3000,9 @@ const (
|
||||
// DisruptionTarget indicates the pod is about to be terminated due to a
|
||||
// disruption (such as preemption, eviction API or garbage-collection).
|
||||
DisruptionTarget PodConditionType = "DisruptionTarget"
|
||||
// PodReadyToStartContainers pod sandbox is successfully configured and
|
||||
// the pod is ready to launch containers.
|
||||
PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers"
|
||||
)
|
||||
|
||||
// These are reasons for a pod's transition to a condition.
|
||||
@ -3136,6 +3294,7 @@ type WeightedPodAffinityTerm struct {
|
||||
// a pod of the set of pods is running
|
||||
type PodAffinityTerm struct {
|
||||
// A label query over a set of resources, in this case pods.
|
||||
// If it's null, this PodAffinityTerm matches with no Pods.
|
||||
// +optional
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
|
||||
// namespaces specifies a static list of namespace names that the term applies to.
|
||||
@ -3157,6 +3316,30 @@ type PodAffinityTerm struct {
|
||||
// An empty selector ({}) matches all namespaces.
|
||||
// +optional
|
||||
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"`
|
||||
// MatchLabelKeys is a set of pod label keys to select which pods will
|
||||
// be taken into consideration. The keys are used to lookup values from the
|
||||
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
|
||||
// to select the group of existing pods which pods will be taken into consideration
|
||||
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
|
||||
// pod labels will be ignored. The default value is empty.
|
||||
// The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
|
||||
// Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
|
||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"`
|
||||
// MismatchLabelKeys is a set of pod label keys to select which pods will
|
||||
// be taken into consideration. The keys are used to lookup values from the
|
||||
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
|
||||
// to select the group of existing pods which pods will be taken into consideration
|
||||
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
|
||||
// pod labels will be ignored. The default value is empty.
|
||||
// The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
|
||||
// Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
|
||||
// This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"`
|
||||
}
|
||||
|
||||
// Node affinity is a group of node affinity scheduling rules.
|
||||
@ -4692,6 +4875,15 @@ type LoadBalancerIngress struct {
|
||||
// +optional
|
||||
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
|
||||
|
||||
// IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.
|
||||
// Setting this to "VIP" indicates that traffic is delivered to the node with
|
||||
// the destination set to the load-balancer's IP and port.
|
||||
// Setting this to "Proxy" indicates that traffic is delivered to the node or pod with
|
||||
// the destination set to the node's IP and node port or the pod's IP and port.
|
||||
// Service implementations may use this information to adjust traffic routing.
|
||||
// +optional
|
||||
IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"`
|
||||
|
||||
// Ports is a list of records of service ports
|
||||
// If used, every port defined in the service should have an entry in it
|
||||
// +listType=atomic
|
||||
@ -4709,6 +4901,8 @@ const (
|
||||
IPv4Protocol IPFamily = "IPv4"
|
||||
// IPv6Protocol indicates that this IP is IPv6 protocol
|
||||
IPv6Protocol IPFamily = "IPv6"
|
||||
// IPFamilyUnknown indicates that this IP is unknown protocol
|
||||
IPFamilyUnknown IPFamily = ""
|
||||
)
|
||||
|
||||
// IPFamilyPolicy represents the dual-stack-ness requested or required by a Service
|
||||
@ -5003,7 +5197,7 @@ type ServicePort struct {
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
@ -5247,7 +5441,7 @@ type EndpointPort struct {
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
@ -7054,3 +7248,15 @@ type PortStatus struct {
|
||||
// +kubebuilder:validation:MaxLength=316
|
||||
Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
|
||||
}
|
||||
|
||||
// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP
|
||||
type LoadBalancerIPMode string
|
||||
|
||||
const (
|
||||
// LoadBalancerIPModeVIP indicates that traffic is delivered to the node with
|
||||
// the destination set to the load-balancer's IP and port.
|
||||
LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP"
|
||||
// LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with
|
||||
// the destination set to the node's IP and port or the pod's IP and port.
|
||||
LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
|
||||
)
|
||||
|
91
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
91
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
@ -127,7 +127,7 @@ var map_CSIPersistentVolumeSource = map[string]string{
|
||||
"nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
}
|
||||
|
||||
func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string {
|
||||
@ -228,6 +228,19 @@ func (ClientIPConfig) SwaggerDoc() map[string]string {
|
||||
return map_ClientIPConfig
|
||||
}
|
||||
|
||||
var map_ClusterTrustBundleProjection = map[string]string{
|
||||
"": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.",
|
||||
"name": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.",
|
||||
"signerName": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.",
|
||||
"labelSelector": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\".",
|
||||
"optional": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.",
|
||||
"path": "Relative path from the volume root to write the bundle.",
|
||||
}
|
||||
|
||||
func (ClusterTrustBundleProjection) SwaggerDoc() map[string]string {
|
||||
return map_ClusterTrustBundleProjection
|
||||
}
|
||||
|
||||
var map_ComponentCondition = map[string]string{
|
||||
"": "Information about the condition of a component.",
|
||||
"type": "Type of condition for a component. Valid value: \"Healthy\"",
|
||||
@ -531,7 +544,7 @@ var map_EndpointPort = map[string]string{
|
||||
"name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
|
||||
"port": "The port number of the endpoint.",
|
||||
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
|
||||
"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
|
||||
"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
|
||||
}
|
||||
|
||||
func (EndpointPort) SwaggerDoc() map[string]string {
|
||||
@ -935,6 +948,7 @@ var map_LifecycleHandler = map[string]string{
|
||||
"exec": "Exec specifies the action to take.",
|
||||
"httpGet": "HTTPGet specifies the http request to perform.",
|
||||
"tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.",
|
||||
"sleep": "Sleep represents the duration that the container should sleep before being terminated.",
|
||||
}
|
||||
|
||||
func (LifecycleHandler) SwaggerDoc() map[string]string {
|
||||
@ -988,6 +1002,7 @@ var map_LoadBalancerIngress = map[string]string{
|
||||
"": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
|
||||
"ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
|
||||
"hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)",
|
||||
"ipMode": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.",
|
||||
"ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it",
|
||||
}
|
||||
|
||||
@ -1023,6 +1038,16 @@ func (LocalVolumeSource) SwaggerDoc() map[string]string {
|
||||
return map_LocalVolumeSource
|
||||
}
|
||||
|
||||
var map_ModifyVolumeStatus = map[string]string{
|
||||
"": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation",
|
||||
"targetVolumeAttributesClassName": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled",
|
||||
"status": "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.",
|
||||
}
|
||||
|
||||
func (ModifyVolumeStatus) SwaggerDoc() map[string]string {
|
||||
return map_ModifyVolumeStatus
|
||||
}
|
||||
|
||||
var map_NFSVolumeSource = map[string]string{
|
||||
"": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.",
|
||||
"server": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
|
||||
@ -1339,15 +1364,16 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_PersistentVolumeClaimSpec = map[string]string{
|
||||
"": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
|
||||
"accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
|
||||
"selector": "selector is a label query over volumes to consider for binding.",
|
||||
"resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
|
||||
"volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.",
|
||||
"storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
|
||||
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
|
||||
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
|
||||
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
|
||||
"": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
|
||||
"accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
|
||||
"selector": "selector is a label query over volumes to consider for binding.",
|
||||
"resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
|
||||
"volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.",
|
||||
"storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
|
||||
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
|
||||
"dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
|
||||
"dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
|
||||
"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.",
|
||||
}
|
||||
|
||||
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
|
||||
@ -1355,13 +1381,15 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_PersistentVolumeClaimStatus = map[string]string{
|
||||
"": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
|
||||
"phase": "phase represents the current phase of PersistentVolumeClaim.",
|
||||
"accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
|
||||
"capacity": "capacity represents the actual resources of the underlying volume.",
|
||||
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
|
||||
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||
"": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
|
||||
"phase": "phase represents the current phase of PersistentVolumeClaim.",
|
||||
"accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
|
||||
"capacity": "capacity represents the actual resources of the underlying volume.",
|
||||
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
|
||||
"allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||
"allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
|
||||
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
||||
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
||||
}
|
||||
|
||||
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
|
||||
@ -1438,6 +1466,7 @@ var map_PersistentVolumeSpec = map[string]string{
|
||||
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
|
||||
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
|
||||
"nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
|
||||
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.",
|
||||
}
|
||||
|
||||
func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
|
||||
@ -1489,10 +1518,12 @@ func (PodAffinity) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_PodAffinityTerm = map[string]string{
|
||||
"": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running",
|
||||
"labelSelector": "A label query over a set of resources, in this case pods.",
|
||||
"labelSelector": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.",
|
||||
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
|
||||
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
|
||||
"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
|
||||
"matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
|
||||
"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.",
|
||||
}
|
||||
|
||||
func (PodAffinityTerm) SwaggerDoc() map[string]string {
|
||||
@ -2325,7 +2356,7 @@ var map_ServicePort = map[string]string{
|
||||
"": "ServicePort contains information on service's port.",
|
||||
"name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
|
||||
"protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
|
||||
"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
|
||||
"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
|
||||
"port": "The port that will be exposed by this service.",
|
||||
"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
|
||||
"nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
|
||||
@ -2390,6 +2421,15 @@ func (SessionAffinityConfig) SwaggerDoc() map[string]string {
|
||||
return map_SessionAffinityConfig
|
||||
}
|
||||
|
||||
var map_SleepAction = map[string]string{
|
||||
"": "SleepAction describes a \"sleep\" action.",
|
||||
"seconds": "Seconds is the number of seconds to sleep.",
|
||||
}
|
||||
|
||||
func (SleepAction) SwaggerDoc() map[string]string {
|
||||
return map_SleepAction
|
||||
}
|
||||
|
||||
var map_StorageOSPersistentVolumeSource = map[string]string{
|
||||
"": "Represents a StorageOS persistent volume resource.",
|
||||
"volumeName": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.",
|
||||
@ -2566,12 +2606,23 @@ var map_VolumeProjection = map[string]string{
|
||||
"downwardAPI": "downwardAPI information about the downwardAPI data to project",
|
||||
"configMap": "configMap information about the configMap data to project",
|
||||
"serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project",
|
||||
"clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.",
|
||||
}
|
||||
|
||||
func (VolumeProjection) SwaggerDoc() map[string]string {
|
||||
return map_VolumeProjection
|
||||
}
|
||||
|
||||
var map_VolumeResourceRequirements = map[string]string{
|
||||
"": "VolumeResourceRequirements describes the storage resource requirements for a volume.",
|
||||
"limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
|
||||
"requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
|
||||
}
|
||||
|
||||
func (VolumeResourceRequirements) SwaggerDoc() map[string]string {
|
||||
return map_VolumeResourceRequirements
|
||||
}
|
||||
|
||||
var map_VolumeSource = map[string]string{
|
||||
"": "Represents the source of a volume to mount. Only one of its members may be specified.",
|
||||
"hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
|
||||
|
143
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
143
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
@ -466,6 +466,42 @@ func (in *ClientIPConfig) DeepCopy() *ClientIPConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) {
|
||||
*out = *in
|
||||
if in.Name != nil {
|
||||
in, out := &in.Name, &out.Name
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.SignerName != nil {
|
||||
in, out := &in.SignerName, &out.SignerName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Optional != nil {
|
||||
in, out := &in.Optional, &out.Optional
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection.
|
||||
func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterTrustBundleProjection)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) {
|
||||
*out = *in
|
||||
@ -2045,6 +2081,11 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) {
|
||||
*out = new(TCPSocketAction)
|
||||
**out = **in
|
||||
}
|
||||
if in.Sleep != nil {
|
||||
in, out := &in.Sleep, &out.Sleep
|
||||
*out = new(SleepAction)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -2228,6 +2269,11 @@ func (in *List) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
|
||||
*out = *in
|
||||
if in.IPMode != nil {
|
||||
in, out := &in.IPMode, &out.IPMode
|
||||
*out = new(LoadBalancerIPMode)
|
||||
**out = **in
|
||||
}
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]PortStatus, len(*in))
|
||||
@ -2308,6 +2354,22 @@ func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus.
|
||||
func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ModifyVolumeStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) {
|
||||
*out = *in
|
||||
@ -3056,6 +3118,11 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
|
||||
*out = new(TypedObjectReference)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.VolumeAttributesClassName != nil {
|
||||
in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -3105,6 +3172,16 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.CurrentVolumeAttributesClassName != nil {
|
||||
in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ModifyVolumeStatus != nil {
|
||||
in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus
|
||||
*out = new(ModifyVolumeStatus)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -3347,6 +3424,11 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
|
||||
*out = new(VolumeNodeAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.VolumeAttributesClassName != nil {
|
||||
in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -3472,6 +3554,16 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) {
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.MatchLabelKeys != nil {
|
||||
in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MismatchLabelKeys != nil {
|
||||
in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -5681,6 +5773,22 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SleepAction) DeepCopyInto(out *SleepAction) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction.
|
||||
func (in *SleepAction) DeepCopy() *SleepAction {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SleepAction)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) {
|
||||
*out = *in
|
||||
@ -6027,6 +6135,11 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
|
||||
*out = new(ServiceAccountTokenProjection)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ClusterTrustBundle != nil {
|
||||
in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle
|
||||
*out = new(ClusterTrustBundleProjection)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -6040,6 +6153,36 @@ func (in *VolumeProjection) DeepCopy() *VolumeProjection {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) {
|
||||
*out = *in
|
||||
if in.Limits != nil {
|
||||
in, out := &in.Limits, &out.Limits
|
||||
*out = make(ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.Requests != nil {
|
||||
in, out := &in.Requests, &out.Requests
|
||||
*out = make(ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements.
|
||||
func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeResourceRequirements)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
|
||||
*out = *in
|
||||
|
4
vendor/k8s.io/api/discovery/v1/generated.proto
generated
vendored
4
vendor/k8s.io/api/discovery/v1/generated.proto
generated
vendored
@ -118,7 +118,7 @@ message EndpointHints {
|
||||
// +structType=atomic
|
||||
message EndpointPort {
|
||||
// name represents the name of this port. All ports in an EndpointSlice must have a unique name.
|
||||
// If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
@ -145,7 +145,7 @@ message EndpointPort {
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
|
4
vendor/k8s.io/api/discovery/v1/types.go
generated
vendored
4
vendor/k8s.io/api/discovery/v1/types.go
generated
vendored
@ -168,7 +168,7 @@ type ForZone struct {
|
||||
// +structType=atomic
|
||||
type EndpointPort struct {
|
||||
// name represents the name of this port. All ports in an EndpointSlice must have a unique name.
|
||||
// If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
@ -195,7 +195,7 @@ type EndpointPort struct {
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
|
4
vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
generated
vendored
4
vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
generated
vendored
@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_EndpointPort = map[string]string{
|
||||
"": "EndpointPort represents a Port used by an EndpointSlice",
|
||||
"name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
|
||||
"name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
|
||||
"protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
|
||||
"port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
|
||||
"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
|
||||
"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
|
||||
}
|
||||
|
||||
func (EndpointPort) SwaggerDoc() map[string]string {
|
||||
|
2
vendor/k8s.io/api/discovery/v1beta1/generated.proto
generated
vendored
2
vendor/k8s.io/api/discovery/v1beta1/generated.proto
generated
vendored
@ -119,7 +119,7 @@ message EndpointHints {
|
||||
// EndpointPort represents a Port used by an EndpointSlice
|
||||
message EndpointPort {
|
||||
// name represents the name of this port. All ports in an EndpointSlice must have a unique name.
|
||||
// If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
|
2
vendor/k8s.io/api/discovery/v1beta1/types.go
generated
vendored
2
vendor/k8s.io/api/discovery/v1beta1/types.go
generated
vendored
@ -172,7 +172,7 @@ type ForZone struct {
|
||||
// EndpointPort represents a Port used by an EndpointSlice
|
||||
type EndpointPort struct {
|
||||
// name represents the name of this port. All ports in an EndpointSlice must have a unique name.
|
||||
// If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name.
|
||||
// Name must either be an empty string or pass DNS_LABEL validation:
|
||||
// * must be no more than 63 characters long.
|
||||
// * must consist of lower case alphanumeric characters or '-'.
|
||||
|
2
vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
generated
vendored
@ -64,7 +64,7 @@ func (EndpointHints) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_EndpointPort = map[string]string{
|
||||
"": "EndpointPort represents a Port used by an EndpointSlice",
|
||||
"name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
|
||||
"name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
|
||||
"protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
|
||||
"port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
|
||||
"appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.",
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -17,9 +17,8 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:prerelease-lifecycle-gen=true
|
||||
|
||||
// +groupName=flowcontrol.apiserver.k8s.io
|
||||
|
||||
// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io".
|
||||
package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1"
|
||||
// Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io".
|
||||
package v1 // import "k8s.io/api/flowcontrol/v1"
|
@ -15,9 +15,9 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto
|
||||
|
||||
package v1alpha1
|
||||
package v1
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
|
||||
func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
|
||||
func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{0}
|
||||
return fileDescriptor_f8a25df358697d27, []int{0}
|
||||
}
|
||||
func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
|
||||
func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
|
||||
func (*FlowDistinguisherMethod) ProtoMessage() {}
|
||||
func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{1}
|
||||
return fileDescriptor_f8a25df358697d27, []int{1}
|
||||
}
|
||||
func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
|
||||
func (m *FlowSchema) Reset() { *m = FlowSchema{} }
|
||||
func (*FlowSchema) ProtoMessage() {}
|
||||
func (*FlowSchema) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{2}
|
||||
return fileDescriptor_f8a25df358697d27, []int{2}
|
||||
}
|
||||
func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
|
||||
func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
|
||||
func (*FlowSchemaCondition) ProtoMessage() {}
|
||||
func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{3}
|
||||
return fileDescriptor_f8a25df358697d27, []int{3}
|
||||
}
|
||||
func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
|
||||
func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
|
||||
func (*FlowSchemaList) ProtoMessage() {}
|
||||
func (*FlowSchemaList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{4}
|
||||
return fileDescriptor_f8a25df358697d27, []int{4}
|
||||
}
|
||||
func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
|
||||
func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
|
||||
func (*FlowSchemaSpec) ProtoMessage() {}
|
||||
func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{5}
|
||||
return fileDescriptor_f8a25df358697d27, []int{5}
|
||||
}
|
||||
func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
|
||||
func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
|
||||
func (*FlowSchemaStatus) ProtoMessage() {}
|
||||
func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{6}
|
||||
return fileDescriptor_f8a25df358697d27, []int{6}
|
||||
}
|
||||
func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
|
||||
func (m *GroupSubject) Reset() { *m = GroupSubject{} }
|
||||
func (*GroupSubject) ProtoMessage() {}
|
||||
func (*GroupSubject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{7}
|
||||
return fileDescriptor_f8a25df358697d27, []int{7}
|
||||
}
|
||||
func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
|
||||
func (m *LimitResponse) Reset() { *m = LimitResponse{} }
|
||||
func (*LimitResponse) ProtoMessage() {}
|
||||
func (*LimitResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{8}
|
||||
return fileDescriptor_f8a25df358697d27, []int{8}
|
||||
}
|
||||
func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
|
||||
func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
|
||||
func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
|
||||
func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{9}
|
||||
return fileDescriptor_f8a25df358697d27, []int{9}
|
||||
}
|
||||
func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
|
||||
func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
|
||||
func (*NonResourcePolicyRule) ProtoMessage() {}
|
||||
func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{10}
|
||||
return fileDescriptor_f8a25df358697d27, []int{10}
|
||||
}
|
||||
func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
|
||||
func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
|
||||
func (*PolicyRulesWithSubjects) ProtoMessage() {}
|
||||
func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{11}
|
||||
return fileDescriptor_f8a25df358697d27, []int{11}
|
||||
}
|
||||
func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
|
||||
func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
|
||||
func (*PriorityLevelConfiguration) ProtoMessage() {}
|
||||
func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{12}
|
||||
return fileDescriptor_f8a25df358697d27, []int{12}
|
||||
}
|
||||
func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
|
||||
func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
|
||||
func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{13}
|
||||
return fileDescriptor_f8a25df358697d27, []int{13}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf
|
||||
func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
|
||||
func (*PriorityLevelConfigurationList) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{14}
|
||||
return fileDescriptor_f8a25df358697d27, []int{14}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
|
||||
func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
|
||||
func (*PriorityLevelConfigurationReference) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{15}
|
||||
return fileDescriptor_f8a25df358697d27, []int{15}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf
|
||||
func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
|
||||
func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{16}
|
||||
return fileDescriptor_f8a25df358697d27, []int{16}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
|
||||
func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
|
||||
func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
|
||||
func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{17}
|
||||
return fileDescriptor_f8a25df358697d27, []int{17}
|
||||
}
|
||||
func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
|
||||
func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
|
||||
func (*QueuingConfiguration) ProtoMessage() {}
|
||||
func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{18}
|
||||
return fileDescriptor_f8a25df358697d27, []int{18}
|
||||
}
|
||||
func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
|
||||
func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
|
||||
func (*ResourcePolicyRule) ProtoMessage() {}
|
||||
func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{19}
|
||||
return fileDescriptor_f8a25df358697d27, []int{19}
|
||||
}
|
||||
func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
|
||||
func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
|
||||
func (*ServiceAccountSubject) ProtoMessage() {}
|
||||
func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{20}
|
||||
return fileDescriptor_f8a25df358697d27, []int{20}
|
||||
}
|
||||
func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
|
||||
func (m *Subject) Reset() { *m = Subject{} }
|
||||
func (*Subject) ProtoMessage() {}
|
||||
func (*Subject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{21}
|
||||
return fileDescriptor_f8a25df358697d27, []int{21}
|
||||
}
|
||||
func (m *Subject) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo
|
||||
func (m *UserSubject) Reset() { *m = UserSubject{} }
|
||||
func (*UserSubject) ProtoMessage() {}
|
||||
func (*UserSubject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_45ba024d525b289b, []int{22}
|
||||
return fileDescriptor_f8a25df358697d27, []int{22}
|
||||
}
|
||||
func (m *UserSubject) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -688,139 +688,137 @@ func (m *UserSubject) XXX_DiscardUnknown() {
|
||||
var xxx_messageInfo_UserSubject proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration")
|
||||
proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod")
|
||||
proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema")
|
||||
proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition")
|
||||
proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaList")
|
||||
proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec")
|
||||
proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus")
|
||||
proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject")
|
||||
proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse")
|
||||
proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration")
|
||||
proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule")
|
||||
proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects")
|
||||
proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration")
|
||||
proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition")
|
||||
proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationList")
|
||||
proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference")
|
||||
proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec")
|
||||
proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus")
|
||||
proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.QueuingConfiguration")
|
||||
proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.ResourcePolicyRule")
|
||||
proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.ServiceAccountSubject")
|
||||
proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1alpha1.Subject")
|
||||
proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.UserSubject")
|
||||
proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.ExemptPriorityLevelConfiguration")
|
||||
proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1.FlowDistinguisherMethod")
|
||||
proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1.FlowSchema")
|
||||
proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaCondition")
|
||||
proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaList")
|
||||
proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaSpec")
|
||||
proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaStatus")
|
||||
proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1.GroupSubject")
|
||||
proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1.LimitResponse")
|
||||
proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.LimitedPriorityLevelConfiguration")
|
||||
proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.NonResourcePolicyRule")
|
||||
proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1.PolicyRulesWithSubjects")
|
||||
proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfiguration")
|
||||
proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationCondition")
|
||||
proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationList")
|
||||
proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationReference")
|
||||
proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationSpec")
|
||||
proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationStatus")
|
||||
proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1.QueuingConfiguration")
|
||||
proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.ResourcePolicyRule")
|
||||
proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1.ServiceAccountSubject")
|
||||
proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1.Subject")
|
||||
proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1.UserSubject")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto", fileDescriptor_45ba024d525b289b)
|
||||
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_f8a25df358697d27)
|
||||
}
|
||||
|
||||
var fileDescriptor_45ba024d525b289b = []byte{
|
||||
// 1621 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0xdb, 0x46,
|
||||
0x1a, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x19, 0xc7, 0xb0, 0xd6, 0x59, 0x48, 0x0e, 0x17, 0xd8,
|
||||
0x64, 0x37, 0x09, 0x15, 0x67, 0x93, 0x6c, 0x16, 0xc1, 0x22, 0x30, 0x93, 0x6c, 0xbe, 0x6c, 0xc7,
|
||||
0x1e, 0x27, 0xd9, 0x36, 0x48, 0x81, 0xd0, 0xd4, 0x58, 0x9a, 0x58, 0x22, 0xd9, 0x19, 0x52, 0x8e,
|
||||
0x8b, 0x1c, 0x0a, 0xf4, 0x0f, 0xf4, 0x07, 0xe4, 0xd8, 0x43, 0x6f, 0x05, 0x7a, 0xed, 0xa5, 0xc7,
|
||||
0xa0, 0xe8, 0x21, 0xc7, 0x9c, 0x84, 0x58, 0xbd, 0xf6, 0x07, 0xb4, 0x39, 0x14, 0xc5, 0x0c, 0x87,
|
||||
0xa4, 0x28, 0x89, 0xa2, 0x52, 0x03, 0x39, 0xf5, 0x66, 0xbe, 0x1f, 0xcf, 0x3b, 0xf3, 0xce, 0xfb,
|
||||
0xf1, 0xc8, 0xe0, 0xf6, 0xde, 0x15, 0xa6, 0x11, 0xbb, 0xbc, 0xe7, 0xed, 0x60, 0x6a, 0x61, 0x17,
|
||||
0xb3, 0x72, 0x13, 0x5b, 0x15, 0x9b, 0x96, 0xa5, 0xc2, 0x70, 0x48, 0x79, 0xb7, 0x6e, 0xef, 0x9b,
|
||||
0xb6, 0xe5, 0x52, 0xbb, 0x5e, 0x6e, 0xae, 0x18, 0x75, 0xa7, 0x66, 0xac, 0x94, 0xab, 0xd8, 0xc2,
|
||||
0xd4, 0x70, 0x71, 0x45, 0x73, 0xa8, 0xed, 0xda, 0xb0, 0xe4, 0x3b, 0x68, 0x86, 0x43, 0xb4, 0x0e,
|
||||
0x07, 0x2d, 0x70, 0x58, 0x3a, 0x57, 0x25, 0x6e, 0xcd, 0xdb, 0xd1, 0x4c, 0xbb, 0x51, 0xae, 0xda,
|
||||
0x55, 0xbb, 0x2c, 0xfc, 0x76, 0xbc, 0x5d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x3e, 0xde, 0xd2, 0xc5,
|
||||
0xe8, 0x00, 0x0d, 0xc3, 0xac, 0x11, 0x0b, 0xd3, 0x83, 0xb2, 0xb3, 0x57, 0xe5, 0x02, 0x56, 0x6e,
|
||||
0x60, 0xd7, 0x28, 0x37, 0x7b, 0x4e, 0xb1, 0x54, 0x4e, 0xf2, 0xa2, 0x9e, 0xe5, 0x92, 0x06, 0xee,
|
||||
0x71, 0xb8, 0x9c, 0xe6, 0xc0, 0xcc, 0x1a, 0x6e, 0x18, 0xdd, 0x7e, 0xea, 0x77, 0x0a, 0x58, 0xbe,
|
||||
0xf9, 0x1c, 0x37, 0x1c, 0x77, 0x93, 0x12, 0x9b, 0x12, 0xf7, 0x60, 0x0d, 0x37, 0x71, 0xfd, 0xba,
|
||||
0x6d, 0xed, 0x92, 0xaa, 0x47, 0x0d, 0x97, 0xd8, 0x16, 0xfc, 0x08, 0x14, 0x2c, 0xbb, 0x41, 0x2c,
|
||||
0x83, 0xcb, 0x4d, 0x8f, 0x52, 0x6c, 0x99, 0x07, 0xdb, 0x35, 0x83, 0x62, 0x56, 0x50, 0x96, 0x95,
|
||||
0xd3, 0x39, 0xfd, 0xaf, 0xed, 0x56, 0xa9, 0xb0, 0x91, 0x60, 0x83, 0x12, 0xbd, 0xe1, 0x7f, 0xc1,
|
||||
0x6c, 0x1d, 0x5b, 0x15, 0x63, 0xa7, 0x8e, 0x37, 0x31, 0x35, 0xb1, 0xe5, 0x16, 0x32, 0x02, 0x70,
|
||||
0xbe, 0xdd, 0x2a, 0xcd, 0xae, 0xc5, 0x55, 0xa8, 0xdb, 0x56, 0x7d, 0x0c, 0x16, 0xff, 0x57, 0xb7,
|
||||
0xf7, 0x6f, 0x10, 0xe6, 0x12, 0xab, 0xea, 0x11, 0x56, 0xc3, 0x74, 0x1d, 0xbb, 0x35, 0xbb, 0x02,
|
||||
0xaf, 0x81, 0xac, 0x7b, 0xe0, 0x60, 0x71, 0xbe, 0xbc, 0x7e, 0xe6, 0x55, 0xab, 0x34, 0xd2, 0x6e,
|
||||
0x95, 0xb2, 0x0f, 0x0e, 0x1c, 0xfc, 0xae, 0x55, 0x3a, 0x91, 0xe0, 0xc6, 0xd5, 0x48, 0x38, 0xaa,
|
||||
0x2f, 0x33, 0x00, 0x70, 0xab, 0x6d, 0x91, 0x38, 0xf8, 0x14, 0x4c, 0xf0, 0xc7, 0xaa, 0x18, 0xae,
|
||||
0x21, 0x30, 0x27, 0x2f, 0x9c, 0xd7, 0xa2, 0x52, 0x09, 0x73, 0xae, 0x39, 0x7b, 0x55, 0x2e, 0x60,
|
||||
0x1a, 0xb7, 0xd6, 0x9a, 0x2b, 0xda, 0xfd, 0x9d, 0x67, 0xd8, 0x74, 0xd7, 0xb1, 0x6b, 0xe8, 0x50,
|
||||
0x9e, 0x02, 0x44, 0x32, 0x14, 0xa2, 0xc2, 0x2d, 0x90, 0x65, 0x0e, 0x36, 0x45, 0x02, 0x26, 0x2f,
|
||||
0x94, 0xb5, 0x94, 0x42, 0xd4, 0xa2, 0xc3, 0x6d, 0x3b, 0xd8, 0xd4, 0xa7, 0x82, 0x2b, 0xf2, 0x2f,
|
||||
0x24, 0xa0, 0xe0, 0xc7, 0x60, 0x8c, 0xb9, 0x86, 0xeb, 0xb1, 0xc2, 0xa8, 0x00, 0x5d, 0x79, 0x1f,
|
||||
0x50, 0xe1, 0xa8, 0xcf, 0x48, 0xd8, 0x31, 0xff, 0x1b, 0x49, 0x40, 0xf5, 0x4d, 0x06, 0xcc, 0x47,
|
||||
0xc6, 0xd7, 0x6d, 0xab, 0x42, 0x44, 0xad, 0x5c, 0x8d, 0xe5, 0xfd, 0x54, 0x57, 0xde, 0x17, 0xfb,
|
||||
0xb8, 0x44, 0x39, 0x87, 0xff, 0x09, 0xcf, 0x9b, 0x11, 0xee, 0x27, 0xe3, 0xc1, 0xdf, 0xb5, 0x4a,
|
||||
0xb3, 0xa1, 0x5b, 0xfc, 0x3c, 0xb0, 0x09, 0x60, 0xdd, 0x60, 0xee, 0x03, 0x6a, 0x58, 0xcc, 0x87,
|
||||
0x25, 0x0d, 0x2c, 0xaf, 0xfd, 0xcf, 0xe1, 0x5e, 0x8a, 0x7b, 0xe8, 0x4b, 0x32, 0x24, 0x5c, 0xeb,
|
||||
0x41, 0x43, 0x7d, 0x22, 0xc0, 0xbf, 0x83, 0x31, 0x8a, 0x0d, 0x66, 0x5b, 0x85, 0xac, 0x38, 0x72,
|
||||
0x98, 0x2f, 0x24, 0xa4, 0x48, 0x6a, 0xe1, 0x3f, 0xc0, 0x78, 0x03, 0x33, 0x66, 0x54, 0x71, 0x21,
|
||||
0x27, 0x0c, 0x67, 0xa5, 0xe1, 0xf8, 0xba, 0x2f, 0x46, 0x81, 0x5e, 0xfd, 0x5e, 0x01, 0x33, 0x51,
|
||||
0x9e, 0xd6, 0x08, 0x73, 0xe1, 0x93, 0x9e, 0xea, 0xd3, 0x86, 0xbb, 0x13, 0xf7, 0x16, 0xb5, 0x37,
|
||||
0x27, 0xc3, 0x4d, 0x04, 0x92, 0x8e, 0xca, 0xdb, 0x04, 0x39, 0xe2, 0xe2, 0x06, 0xcf, 0xfa, 0xe8,
|
||||
0xe9, 0xc9, 0x0b, 0x67, 0xde, 0xa3, 0x4a, 0xf4, 0x69, 0x89, 0x9b, 0xbb, 0xc3, 0x11, 0x90, 0x0f,
|
||||
0xa4, 0xfe, 0x3c, 0xda, 0x79, 0x05, 0x5e, 0x91, 0xf0, 0x6b, 0x05, 0x2c, 0x39, 0x89, 0x33, 0x46,
|
||||
0xde, 0xea, 0x46, 0x6a, 0xe8, 0xe4, 0x31, 0x85, 0xf0, 0x2e, 0xe6, 0xb3, 0x05, 0xeb, 0xaa, 0x3c,
|
||||
0xd3, 0xd2, 0x00, 0xe3, 0x01, 0x67, 0x81, 0x77, 0x01, 0x6c, 0x18, 0x2e, 0xcf, 0x69, 0x75, 0x93,
|
||||
0x62, 0x13, 0x57, 0x38, 0xaa, 0x1c, 0x4c, 0x61, 0x7d, 0xac, 0xf7, 0x58, 0xa0, 0x3e, 0x5e, 0xf0,
|
||||
0x0b, 0x05, 0xcc, 0x57, 0x7a, 0x07, 0x8d, 0xac, 0xcc, 0x2b, 0x43, 0xa5, 0xba, 0xcf, 0xa0, 0xd2,
|
||||
0x17, 0xdb, 0xad, 0xd2, 0x7c, 0x1f, 0x05, 0xea, 0x17, 0x0d, 0x7e, 0x02, 0x72, 0xd4, 0xab, 0x63,
|
||||
0x56, 0xc8, 0x8a, 0x17, 0x4e, 0x0f, 0xbb, 0x69, 0xd7, 0x89, 0x79, 0x80, 0xb8, 0xcf, 0xff, 0x89,
|
||||
0x5b, 0xdb, 0xf6, 0xc4, 0xc4, 0x62, 0xd1, 0x73, 0x0b, 0x15, 0xf2, 0x51, 0xd5, 0x17, 0x60, 0xae,
|
||||
0x7b, 0x70, 0xc0, 0x1a, 0x00, 0x66, 0xd0, 0xab, 0x7c, 0x4d, 0xf0, 0xb8, 0x17, 0xdf, 0xa3, 0xb2,
|
||||
0xc2, 0x46, 0x8f, 0xc6, 0x66, 0x28, 0x62, 0xa8, 0x03, 0x5b, 0x3d, 0x0f, 0xa6, 0x6e, 0x51, 0xdb,
|
||||
0x73, 0xe4, 0x21, 0xe1, 0x32, 0xc8, 0x5a, 0x46, 0x23, 0x18, 0x41, 0xe1, 0x5c, 0xdc, 0x30, 0x1a,
|
||||
0x18, 0x09, 0x8d, 0xfa, 0x95, 0x02, 0xa6, 0xd7, 0x48, 0x83, 0xb8, 0x08, 0x33, 0xc7, 0xb6, 0x18,
|
||||
0x86, 0x97, 0x62, 0x63, 0xeb, 0x64, 0xd7, 0xd8, 0x3a, 0x16, 0x33, 0xee, 0x18, 0x58, 0x4f, 0xc0,
|
||||
0xf8, 0xa7, 0x1e, 0xf6, 0x88, 0x55, 0x95, 0x63, 0xfb, 0x52, 0xea, 0x0d, 0xb7, 0x7c, 0xfb, 0x58,
|
||||
0xc5, 0xe9, 0x93, 0x7c, 0x10, 0x48, 0x0d, 0x0a, 0x20, 0xd5, 0xdf, 0x32, 0xe0, 0xa4, 0x88, 0x8c,
|
||||
0x2b, 0x03, 0xb6, 0xf3, 0x13, 0x50, 0x30, 0x18, 0xf3, 0x28, 0xae, 0x24, 0x6d, 0xe7, 0x65, 0x79,
|
||||
0x9d, 0xc2, 0x6a, 0x82, 0x1d, 0x4a, 0x44, 0x80, 0x7b, 0x60, 0xba, 0xde, 0x79, 0x79, 0x79, 0x4f,
|
||||
0x2d, 0xf5, 0x9e, 0xb1, 0x94, 0xe9, 0x0b, 0xf2, 0x08, 0xf1, 0xb4, 0xa3, 0x38, 0x76, 0x3f, 0x3a,
|
||||
0x30, 0x3a, 0x3c, 0x1d, 0x80, 0xf7, 0xc1, 0xc2, 0x8e, 0x4d, 0xa9, 0xbd, 0x4f, 0xac, 0xaa, 0x88,
|
||||
0x13, 0x80, 0x64, 0x05, 0xc8, 0x5f, 0xda, 0xad, 0xd2, 0x82, 0xde, 0xcf, 0x00, 0xf5, 0xf7, 0x53,
|
||||
0xf7, 0xc1, 0xc2, 0x06, 0x1f, 0x2c, 0xcc, 0xf6, 0xa8, 0x89, 0xa3, 0x9e, 0x80, 0x25, 0x90, 0x6b,
|
||||
0x62, 0xba, 0xe3, 0xd7, 0x75, 0x5e, 0xcf, 0xf3, 0x8e, 0x78, 0xc4, 0x05, 0xc8, 0x97, 0xf3, 0x9b,
|
||||
0x58, 0x91, 0xe7, 0x43, 0xb4, 0xc6, 0x0a, 0x63, 0xc2, 0x54, 0xdc, 0x64, 0x23, 0xae, 0x42, 0xdd,
|
||||
0xb6, 0xea, 0x61, 0x06, 0x2c, 0x26, 0xb4, 0x20, 0x7c, 0x04, 0x26, 0x98, 0xfc, 0x5b, 0xb6, 0xd5,
|
||||
0xe9, 0xd4, 0xc7, 0x90, 0xce, 0xd1, 0x16, 0x08, 0xd0, 0x50, 0x88, 0x05, 0x1d, 0x30, 0x4d, 0xe5,
|
||||
0x19, 0x44, 0x50, 0xb9, 0x0d, 0xfe, 0x95, 0x0a, 0xde, 0x9b, 0x9f, 0xe8, 0xb9, 0x51, 0x27, 0x22,
|
||||
0x8a, 0x07, 0x80, 0x2f, 0xc0, 0x5c, 0xc7, 0xc5, 0xfd, 0xa0, 0xa3, 0x22, 0xe8, 0xe5, 0xd4, 0xa0,
|
||||
0x7d, 0xdf, 0x45, 0x2f, 0xc8, 0xb8, 0x73, 0x1b, 0x5d, 0xb8, 0xa8, 0x27, 0x92, 0xfa, 0x63, 0x06,
|
||||
0x0c, 0x58, 0x10, 0x1f, 0x80, 0xf0, 0x19, 0x31, 0xc2, 0x77, 0xed, 0x08, 0xab, 0x2f, 0x91, 0x00,
|
||||
0x92, 0x2e, 0x02, 0xb8, 0x7a, 0x94, 0x20, 0x83, 0x09, 0xe1, 0x2f, 0x19, 0xf0, 0xb7, 0x64, 0xe7,
|
||||
0x88, 0x20, 0xde, 0x8b, 0x4d, 0xda, 0x7f, 0x77, 0x4d, 0xda, 0x53, 0x43, 0x40, 0xfc, 0x49, 0x18,
|
||||
0xbb, 0x08, 0xe3, 0x5b, 0x05, 0x14, 0x93, 0xf3, 0xf6, 0x01, 0x08, 0xe4, 0xd3, 0x38, 0x81, 0xbc,
|
||||
0x7a, 0x84, 0x2a, 0x4b, 0x20, 0x94, 0xb7, 0x06, 0x15, 0x57, 0xc8, 0xfc, 0x86, 0x58, 0xfd, 0xdf,
|
||||
0x64, 0x06, 0xe5, 0x4a, 0x30, 0xd5, 0x94, 0x9f, 0x30, 0x31, 0xef, 0x9b, 0x16, 0x5f, 0x40, 0x0d,
|
||||
0xbe, 0x43, 0xfc, 0x8a, 0x24, 0x60, 0xbc, 0xee, 0xaf, 0x6c, 0xd9, 0xd7, 0xfa, 0x70, 0x9b, 0x72,
|
||||
0xd0, 0x8a, 0xf7, 0xe9, 0x81, 0x34, 0x43, 0x01, 0x3e, 0xc4, 0x60, 0x0c, 0x8b, 0x9f, 0xee, 0x43,
|
||||
0x37, 0x77, 0xda, 0x2f, 0x7d, 0x1d, 0xf0, 0x42, 0xf4, 0xad, 0x90, 0x04, 0x57, 0x5f, 0x2a, 0x60,
|
||||
0x39, 0x6d, 0x2a, 0xc0, 0xe7, 0x7d, 0xd8, 0xde, 0x51, 0xc8, 0xfc, 0xf0, 0xec, 0xef, 0x5b, 0x05,
|
||||
0x1c, 0xef, 0xc7, 0xa9, 0x78, 0xa3, 0x71, 0x22, 0x15, 0xb2, 0xa0, 0xb0, 0xd1, 0xb6, 0x84, 0x14,
|
||||
0x49, 0x2d, 0x3c, 0x0b, 0x26, 0x6a, 0x86, 0x55, 0xd9, 0x26, 0x9f, 0x05, 0x1c, 0x3f, 0x2c, 0xf5,
|
||||
0xdb, 0x52, 0x8e, 0x42, 0x0b, 0x78, 0x03, 0xcc, 0x09, 0xbf, 0x35, 0x6c, 0x55, 0xdd, 0x9a, 0x78,
|
||||
0x13, 0xc9, 0x51, 0xc2, 0xdd, 0xb3, 0xd5, 0xa5, 0x47, 0x3d, 0x1e, 0xea, 0xaf, 0x0a, 0x80, 0x7f,
|
||||
0x84, 0x56, 0x9c, 0x01, 0x79, 0xc3, 0x21, 0x82, 0xed, 0xfa, 0xcd, 0x96, 0xd7, 0xa7, 0xdb, 0xad,
|
||||
0x52, 0x7e, 0x75, 0xf3, 0x8e, 0x2f, 0x44, 0x91, 0x9e, 0x1b, 0x07, 0xfb, 0xd6, 0xdf, 0xab, 0xd2,
|
||||
0x38, 0x08, 0xcc, 0x50, 0xa4, 0x87, 0x57, 0xc0, 0x94, 0x59, 0xf7, 0x98, 0x8b, 0xe9, 0xb6, 0x69,
|
||||
0x3b, 0x58, 0x0c, 0xa7, 0x09, 0xfd, 0xb8, 0xbc, 0xd3, 0xd4, 0xf5, 0x0e, 0x1d, 0x8a, 0x59, 0x42,
|
||||
0x0d, 0x00, 0xde, 0x59, 0xcc, 0x31, 0x78, 0x9c, 0x9c, 0x88, 0x33, 0xc3, 0x1f, 0x6c, 0x23, 0x94,
|
||||
0xa2, 0x0e, 0x0b, 0xf5, 0x19, 0x58, 0xd8, 0xc6, 0xb4, 0x49, 0x4c, 0xbc, 0x6a, 0x9a, 0xb6, 0x67,
|
||||
0xb9, 0x01, 0x6f, 0x2f, 0x83, 0x7c, 0x68, 0x26, 0x9b, 0xef, 0x98, 0x8c, 0x9f, 0x0f, 0xb1, 0x50,
|
||||
0x64, 0x13, 0x76, 0x7b, 0x26, 0xb1, 0xdb, 0x7f, 0xc8, 0x80, 0xf1, 0x08, 0x3e, 0xbb, 0x47, 0xac,
|
||||
0x8a, 0x44, 0x3e, 0x11, 0x58, 0xdf, 0x23, 0x56, 0xe5, 0x5d, 0xab, 0x34, 0x29, 0xcd, 0xf8, 0x27,
|
||||
0x12, 0x86, 0xf0, 0x2e, 0xc8, 0x7a, 0x0c, 0x53, 0xd9, 0xc7, 0x67, 0x53, 0xab, 0xf9, 0x21, 0xc3,
|
||||
0x34, 0x20, 0x5a, 0x13, 0x1c, 0x9a, 0x0b, 0x90, 0xc0, 0x80, 0x1b, 0x20, 0x57, 0xe5, 0xaf, 0x22,
|
||||
0x5b, 0xf5, 0x5c, 0x2a, 0x58, 0xe7, 0x2f, 0x1a, 0xbf, 0x10, 0x84, 0x04, 0xf9, 0x30, 0x90, 0x82,
|
||||
0x19, 0x16, 0x4b, 0xa2, 0x78, 0xb0, 0x61, 0x88, 0x53, 0xdf, 0xdc, 0xeb, 0xb0, 0xdd, 0x2a, 0xcd,
|
||||
0xc4, 0x55, 0xa8, 0x2b, 0x82, 0x5a, 0x06, 0x93, 0x1d, 0x57, 0x4c, 0x9f, 0xb5, 0xfa, 0xcd, 0x57,
|
||||
0x87, 0xc5, 0x91, 0xd7, 0x87, 0xc5, 0x91, 0x37, 0x87, 0xc5, 0x91, 0xcf, 0xdb, 0x45, 0xe5, 0x55,
|
||||
0xbb, 0xa8, 0xbc, 0x6e, 0x17, 0x95, 0x37, 0xed, 0xa2, 0xf2, 0xb6, 0x5d, 0x54, 0xbe, 0xfc, 0xa9,
|
||||
0x38, 0xf2, 0xb8, 0x94, 0xf2, 0x2f, 0xda, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x6c, 0x4e,
|
||||
0x4e, 0xdd, 0x15, 0x00, 0x00,
|
||||
var fileDescriptor_f8a25df358697d27 = []byte{
|
||||
// 1588 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5,
|
||||
0x16, 0xb6, 0x64, 0xc9, 0xb6, 0x8e, 0x9f, 0x69, 0xc7, 0x65, 0xc5, 0xb9, 0x25, 0x39, 0x73, 0xeb,
|
||||
0xe6, 0x71, 0x43, 0xa4, 0xc4, 0x45, 0x20, 0xa9, 0x00, 0xa9, 0x4c, 0x12, 0xf2, 0xb2, 0x1d, 0xa7,
|
||||
0x95, 0x07, 0x15, 0xa8, 0x82, 0xd1, 0xa8, 0x2d, 0x4d, 0x2c, 0xcd, 0x0c, 0xdd, 0x33, 0x32, 0xa6,
|
||||
0x8a, 0x2a, 0x7e, 0x42, 0x56, 0x2c, 0x59, 0xc0, 0x3f, 0x60, 0x45, 0xc1, 0x86, 0x65, 0x76, 0x64,
|
||||
0x19, 0x58, 0xa8, 0x88, 0xf8, 0x0b, 0x2c, 0x20, 0x2b, 0xaa, 0x7b, 0x7a, 0x66, 0x34, 0x92, 0x66,
|
||||
0xac, 0xf2, 0x22, 0x6c, 0xd8, 0x79, 0xce, 0xf9, 0xce, 0x77, 0xba, 0x4f, 0x9f, 0x97, 0x0c, 0xea,
|
||||
0xce, 0x05, 0x56, 0x32, 0xac, 0xf2, 0x8e, 0x5b, 0x25, 0xd4, 0x24, 0x0e, 0x61, 0xe5, 0x36, 0x31,
|
||||
0x6b, 0x16, 0x2d, 0x4b, 0x85, 0x66, 0x1b, 0xe5, 0xed, 0xa6, 0xb5, 0xab, 0x5b, 0xa6, 0x43, 0xad,
|
||||
0x66, 0xb9, 0x7d, 0xae, 0x5c, 0x27, 0x26, 0xa1, 0x9a, 0x43, 0x6a, 0x25, 0x9b, 0x5a, 0x8e, 0x85,
|
||||
0x8e, 0x78, 0xd0, 0x92, 0x66, 0x1b, 0xa5, 0x1e, 0x68, 0xa9, 0x7d, 0x6e, 0xe5, 0x4c, 0xdd, 0x70,
|
||||
0x1a, 0x6e, 0xb5, 0xa4, 0x5b, 0xad, 0x72, 0xdd, 0xaa, 0x5b, 0x65, 0x61, 0x51, 0x75, 0xb7, 0xc5,
|
||||
0x97, 0xf8, 0x10, 0x7f, 0x79, 0x4c, 0x2b, 0x6f, 0x86, 0x4e, 0x5b, 0x9a, 0xde, 0x30, 0x4c, 0x42,
|
||||
0xf7, 0xca, 0xf6, 0x4e, 0x9d, 0x0b, 0x58, 0xb9, 0x45, 0x1c, 0x6d, 0x88, 0xff, 0x95, 0x72, 0x9c,
|
||||
0x15, 0x75, 0x4d, 0xc7, 0x68, 0x91, 0x01, 0x83, 0xb7, 0xf6, 0x33, 0x60, 0x7a, 0x83, 0xb4, 0xb4,
|
||||
0x7e, 0x3b, 0xe5, 0xc7, 0x14, 0xac, 0x5e, 0xff, 0x8c, 0xb4, 0x6c, 0x67, 0x8b, 0x1a, 0x16, 0x35,
|
||||
0x9c, 0xbd, 0x75, 0xd2, 0x26, 0xcd, 0xab, 0x96, 0xb9, 0x6d, 0xd4, 0x5d, 0xaa, 0x39, 0x86, 0x65,
|
||||
0xa2, 0x0f, 0x20, 0x6f, 0x5a, 0x2d, 0xc3, 0xd4, 0xb8, 0x5c, 0x77, 0x29, 0x25, 0xa6, 0xbe, 0x57,
|
||||
0x69, 0x68, 0x94, 0xb0, 0x7c, 0x6a, 0x35, 0x75, 0x32, 0xab, 0xfe, 0xa7, 0xdb, 0x29, 0xe6, 0x37,
|
||||
0x63, 0x30, 0x38, 0xd6, 0x1a, 0xbd, 0x0b, 0xf3, 0x4d, 0x62, 0xd6, 0xb4, 0x6a, 0x93, 0x6c, 0x11,
|
||||
0xaa, 0x13, 0xd3, 0xc9, 0xa7, 0x05, 0xe1, 0x62, 0xb7, 0x53, 0x9c, 0x5f, 0x8f, 0xaa, 0x70, 0x3f,
|
||||
0x56, 0x79, 0x0c, 0xcb, 0xef, 0x37, 0xad, 0xdd, 0x6b, 0x06, 0x73, 0x0c, 0xb3, 0xee, 0x1a, 0xac,
|
||||
0x41, 0xe8, 0x06, 0x71, 0x1a, 0x56, 0x0d, 0x5d, 0x86, 0x8c, 0xb3, 0x67, 0x13, 0x71, 0xbe, 0x9c,
|
||||
0x7a, 0xfa, 0x59, 0xa7, 0x38, 0xd6, 0xed, 0x14, 0x33, 0xf7, 0xf7, 0x6c, 0xf2, 0xaa, 0x53, 0x3c,
|
||||
0x1a, 0x63, 0xc6, 0xd5, 0x58, 0x18, 0x2a, 0x4f, 0xd3, 0x00, 0x1c, 0x55, 0x11, 0x81, 0x43, 0x9f,
|
||||
0xc0, 0x14, 0x7f, 0xac, 0x9a, 0xe6, 0x68, 0x82, 0x73, 0x7a, 0xed, 0x6c, 0x29, 0x4c, 0x92, 0x20,
|
||||
0xe6, 0x25, 0x7b, 0xa7, 0xce, 0x05, 0xac, 0xc4, 0xd1, 0xa5, 0xf6, 0xb9, 0xd2, 0xdd, 0xea, 0x13,
|
||||
0xa2, 0x3b, 0x1b, 0xc4, 0xd1, 0x54, 0x24, 0x4f, 0x01, 0xa1, 0x0c, 0x07, 0xac, 0xe8, 0x0e, 0x64,
|
||||
0x98, 0x4d, 0x74, 0x11, 0x80, 0xe9, 0xb5, 0x53, 0xa5, 0xd8, 0x14, 0x2c, 0x85, 0xc7, 0xaa, 0xd8,
|
||||
0x44, 0x57, 0x67, 0xfc, 0xcb, 0xf1, 0x2f, 0x2c, 0x48, 0x50, 0x05, 0x26, 0x98, 0xa3, 0x39, 0x2e,
|
||||
0xcb, 0x8f, 0x0b, 0xba, 0xd3, 0xa3, 0xd1, 0x09, 0x13, 0x75, 0x4e, 0x12, 0x4e, 0x78, 0xdf, 0x58,
|
||||
0x52, 0x29, 0x2f, 0xd2, 0xb0, 0x18, 0x82, 0xaf, 0x5a, 0x66, 0xcd, 0x10, 0xf9, 0x71, 0x29, 0x12,
|
||||
0xeb, 0x13, 0x7d, 0xb1, 0x5e, 0x1e, 0x62, 0x12, 0xc6, 0x19, 0x5d, 0x0c, 0x4e, 0x9a, 0x16, 0xe6,
|
||||
0xc7, 0xa2, 0xce, 0x5f, 0x75, 0x8a, 0xf3, 0x81, 0x59, 0xf4, 0x3c, 0xa8, 0x0d, 0xa8, 0xa9, 0x31,
|
||||
0xe7, 0x3e, 0xd5, 0x4c, 0xe6, 0xd1, 0x1a, 0x2d, 0x22, 0x2f, 0xfc, 0xff, 0xd1, 0x5e, 0x87, 0x5b,
|
||||
0xa8, 0x2b, 0xd2, 0x25, 0x5a, 0x1f, 0x60, 0xc3, 0x43, 0x3c, 0xa0, 0xe3, 0x30, 0x41, 0x89, 0xc6,
|
||||
0x2c, 0x33, 0x9f, 0x11, 0x47, 0x0e, 0xe2, 0x85, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x05, 0x93, 0x2d,
|
||||
0xc2, 0x98, 0x56, 0x27, 0xf9, 0xac, 0x00, 0xce, 0x4b, 0xe0, 0xe4, 0x86, 0x27, 0xc6, 0xbe, 0x5e,
|
||||
0xf9, 0x21, 0x05, 0x73, 0x61, 0x9c, 0xd6, 0x0d, 0xe6, 0xa0, 0x8f, 0x06, 0x32, 0xae, 0x34, 0xda,
|
||||
0x9d, 0xb8, 0xb5, 0xc8, 0xb7, 0x05, 0xe9, 0x6e, 0xca, 0x97, 0xf4, 0x64, 0xdb, 0x6d, 0xc8, 0x1a,
|
||||
0x0e, 0x69, 0xf1, 0xa8, 0x8f, 0x9f, 0x9c, 0x5e, 0xfb, 0xdf, 0x48, 0xf9, 0xa1, 0xce, 0x4a, 0xc6,
|
||||
0xec, 0x2d, 0x6e, 0x8b, 0x3d, 0x0a, 0xe5, 0x97, 0xf1, 0xde, 0xc3, 0xf3, 0x2c, 0x44, 0xdf, 0xa4,
|
||||
0x60, 0xc5, 0x8e, 0xed, 0x28, 0xf2, 0x3e, 0xef, 0x25, 0x38, 0x8d, 0x6f, 0x47, 0x98, 0x6c, 0x13,
|
||||
0xde, 0x43, 0x88, 0xaa, 0xc8, 0xd3, 0xac, 0x24, 0x80, 0x13, 0x4e, 0x81, 0x6e, 0x03, 0x6a, 0x69,
|
||||
0x0e, 0x8f, 0x63, 0x7d, 0x8b, 0x12, 0x9d, 0xd4, 0x38, 0xab, 0x6c, 0x40, 0x41, 0x4e, 0x6c, 0x0c,
|
||||
0x20, 0xf0, 0x10, 0x2b, 0xf4, 0x05, 0x2c, 0xd6, 0x06, 0xfb, 0x89, 0x4c, 0xc6, 0xb5, 0x7d, 0xa2,
|
||||
0x3b, 0xa4, 0x13, 0xa9, 0xcb, 0xdd, 0x4e, 0x71, 0x71, 0x88, 0x02, 0x0f, 0xf3, 0x83, 0x1e, 0x41,
|
||||
0x96, 0xba, 0x4d, 0xc2, 0xf2, 0x19, 0xf1, 0x9c, 0x49, 0x0e, 0xb7, 0xac, 0xa6, 0xa1, 0xef, 0x61,
|
||||
0x8e, 0x7e, 0x64, 0x38, 0x8d, 0x8a, 0x2b, 0x9a, 0x11, 0x0b, 0xdf, 0x56, 0xa8, 0xb0, 0xc7, 0xa7,
|
||||
0xb4, 0x61, 0xa1, 0xbf, 0x3f, 0xa0, 0x2a, 0x80, 0xee, 0x97, 0x24, 0x9f, 0x00, 0xe3, 0x7d, 0xb9,
|
||||
0x19, 0x9f, 0x40, 0x41, 0x25, 0x87, 0xbd, 0x30, 0x10, 0x31, 0xdc, 0xc3, 0xaa, 0x9c, 0x85, 0x99,
|
||||
0x1b, 0xd4, 0x72, 0x6d, 0x79, 0x3c, 0xb4, 0x0a, 0x19, 0x53, 0x6b, 0xf9, 0x3d, 0x26, 0x68, 0x79,
|
||||
0x9b, 0x5a, 0x8b, 0x60, 0xa1, 0x51, 0xbe, 0x4e, 0xc1, 0xec, 0xba, 0xd1, 0x32, 0x1c, 0x4c, 0x98,
|
||||
0x6d, 0x99, 0x8c, 0xa0, 0xf3, 0x91, 0xbe, 0x74, 0xac, 0xaf, 0x2f, 0x1d, 0x8a, 0x80, 0x7b, 0x3a,
|
||||
0xd2, 0x43, 0x98, 0xfc, 0xd4, 0x25, 0xae, 0x61, 0xd6, 0x65, 0x2f, 0x2e, 0x27, 0xdc, 0xed, 0x9e,
|
||||
0x87, 0x8c, 0x24, 0x96, 0x3a, 0xcd, 0x6b, 0x5c, 0x6a, 0xb0, 0x4f, 0xa6, 0xfc, 0x91, 0x86, 0x63,
|
||||
0xc2, 0x27, 0xa9, 0xfd, 0x23, 0xc3, 0x96, 0xc0, 0x6c, 0xb3, 0xf7, 0xca, 0xf2, 0x76, 0x27, 0x13,
|
||||
0x6e, 0x17, 0x09, 0x91, 0xba, 0x24, 0x23, 0x18, 0x0d, 0x33, 0x8e, 0xb2, 0x0e, 0x9b, 0xe9, 0xe3,
|
||||
0xa3, 0xcf, 0x74, 0x74, 0x17, 0x96, 0xaa, 0x16, 0xa5, 0xd6, 0xae, 0x61, 0xd6, 0x85, 0x1f, 0x9f,
|
||||
0x24, 0x23, 0x48, 0x8e, 0x74, 0x3b, 0xc5, 0x25, 0x75, 0x18, 0x00, 0x0f, 0xb7, 0x53, 0x76, 0x61,
|
||||
0x69, 0x93, 0x77, 0x0d, 0x66, 0xb9, 0x54, 0x27, 0x61, 0xf6, 0xa3, 0x22, 0x64, 0xdb, 0x84, 0x56,
|
||||
0xbd, 0x0c, 0xce, 0xa9, 0x39, 0x9e, 0xfb, 0x0f, 0xb9, 0x00, 0x7b, 0x72, 0x7e, 0x13, 0x33, 0xb4,
|
||||
0x7c, 0x80, 0xd7, 0x59, 0x7e, 0x42, 0x40, 0xc5, 0x4d, 0x36, 0xa3, 0x2a, 0xdc, 0x8f, 0x55, 0x7e,
|
||||
0x4e, 0xc3, 0x72, 0x4c, 0xb1, 0xa1, 0x2d, 0x98, 0x62, 0xf2, 0x6f, 0x59, 0x40, 0x4a, 0xc2, 0x33,
|
||||
0x48, 0xb3, 0xb0, 0xa1, 0xfb, 0x3c, 0x38, 0x60, 0x41, 0x4f, 0x60, 0x96, 0x4a, 0xef, 0xc2, 0x9d,
|
||||
0x6c, 0xec, 0x67, 0x12, 0x68, 0x07, 0x63, 0x12, 0x3e, 0x31, 0xee, 0xe5, 0xc2, 0x51, 0x6a, 0xd4,
|
||||
0x86, 0x85, 0x9e, 0xcb, 0x7a, 0xee, 0xc6, 0x85, 0xbb, 0xb3, 0x09, 0xee, 0x86, 0xbe, 0x82, 0x9a,
|
||||
0x97, 0x1e, 0x17, 0x36, 0xfb, 0x18, 0xf1, 0x80, 0x0f, 0xe5, 0xa7, 0x34, 0x24, 0xf4, 0xfa, 0xd7,
|
||||
0xb0, 0xa3, 0x7d, 0x18, 0xd9, 0xd1, 0x2e, 0x1e, 0x68, 0x7e, 0xc5, 0xee, 0x6c, 0x7a, 0xdf, 0xce,
|
||||
0x76, 0xe9, 0x60, 0xf4, 0xc9, 0x3b, 0xdc, 0x9f, 0x69, 0xf8, 0x6f, 0xbc, 0x71, 0xb8, 0xd3, 0xdd,
|
||||
0x89, 0xf4, 0xce, 0xb7, 0xfb, 0x7a, 0xe7, 0x89, 0x11, 0x28, 0xfe, 0xdd, 0xf1, 0xfa, 0x76, 0xbc,
|
||||
0x5f, 0x53, 0x50, 0x88, 0x8f, 0xdb, 0x6b, 0xd8, 0xf9, 0x1e, 0x47, 0x77, 0xbe, 0xf3, 0x07, 0xca,
|
||||
0xaf, 0x98, 0x1d, 0xf0, 0x46, 0x52, 0x5a, 0x05, 0x2b, 0xdb, 0x08, 0x63, 0xfc, 0xdb, 0x74, 0x52,
|
||||
0x94, 0xc4, 0x72, 0xb9, 0xcf, 0xef, 0x8d, 0x88, 0xf5, 0x75, 0x93, 0x0f, 0x97, 0x16, 0x9f, 0x0f,
|
||||
0x5e, 0x2e, 0xea, 0x30, 0xd9, 0xf4, 0x86, 0xb0, 0xac, 0xe2, 0x77, 0xf6, 0x9b, 0x7f, 0x49, 0xe3,
|
||||
0xda, 0x1b, 0xf5, 0x12, 0x86, 0x7d, 0x66, 0xf4, 0x31, 0x4c, 0x10, 0xf1, 0xab, 0x7a, 0x84, 0x52,
|
||||
0xde, 0xef, 0xe7, 0xb7, 0x0a, 0x3c, 0xed, 0x3c, 0x14, 0x96, 0xb4, 0xca, 0x57, 0x29, 0x58, 0xdd,
|
||||
0xaf, 0x07, 0x20, 0x3a, 0x64, 0x4f, 0x3b, 0xd8, 0xce, 0x3d, 0xfa, 0xde, 0xf6, 0x5d, 0x0a, 0x0e,
|
||||
0x0f, 0xdb, 0x89, 0x78, 0x41, 0xf1, 0x45, 0x28, 0xd8, 0x62, 0x82, 0x82, 0xba, 0x27, 0xa4, 0x58,
|
||||
0x6a, 0xd1, 0x1b, 0x30, 0xd5, 0xd0, 0xcc, 0x5a, 0xc5, 0xf8, 0xdc, 0x5f, 0xc5, 0x83, 0x94, 0xbe,
|
||||
0x29, 0xe5, 0x38, 0x40, 0xa0, 0x6b, 0xb0, 0x20, 0xec, 0xd6, 0x89, 0x59, 0x77, 0x1a, 0xe2, 0x1d,
|
||||
0xe4, 0xb6, 0x11, 0xcc, 0x95, 0x7b, 0x7d, 0x7a, 0x3c, 0x60, 0xa1, 0xfc, 0x95, 0x02, 0x74, 0x90,
|
||||
0x05, 0xe1, 0x34, 0xe4, 0x34, 0xdb, 0x10, 0x7b, 0xaa, 0x57, 0x54, 0x39, 0x75, 0xb6, 0xdb, 0x29,
|
||||
0xe6, 0xae, 0x6c, 0xdd, 0xf2, 0x84, 0x38, 0xd4, 0x73, 0xb0, 0x3f, 0x45, 0xbd, 0x69, 0x29, 0xc1,
|
||||
0xbe, 0x63, 0x86, 0x43, 0x3d, 0xba, 0x00, 0x33, 0x7a, 0xd3, 0x65, 0x0e, 0xa1, 0x15, 0xdd, 0xb2,
|
||||
0x89, 0x68, 0x42, 0x53, 0xea, 0x61, 0x79, 0xa7, 0x99, 0xab, 0x3d, 0x3a, 0x1c, 0x41, 0xa2, 0x12,
|
||||
0x00, 0xaf, 0x23, 0x66, 0x6b, 0xdc, 0x4f, 0x56, 0xf8, 0x99, 0xe3, 0x0f, 0xb6, 0x19, 0x48, 0x71,
|
||||
0x0f, 0x42, 0x79, 0x02, 0x4b, 0x15, 0x42, 0xdb, 0x86, 0x4e, 0xae, 0xe8, 0xba, 0xe5, 0x9a, 0x8e,
|
||||
0xbf, 0x71, 0x97, 0x21, 0x17, 0xc0, 0x64, 0xa9, 0x1d, 0x92, 0xfe, 0x73, 0x01, 0x17, 0x0e, 0x31,
|
||||
0x41, 0x6d, 0xa7, 0x63, 0x6b, 0xfb, 0xfb, 0x34, 0x4c, 0x86, 0xf4, 0x99, 0x1d, 0xc3, 0xac, 0x49,
|
||||
0xe6, 0xa3, 0x3e, 0xfa, 0x8e, 0x61, 0xd6, 0x5e, 0x75, 0x8a, 0xd3, 0x12, 0xc6, 0x3f, 0xb1, 0x00,
|
||||
0xa2, 0x6b, 0x90, 0x71, 0x19, 0xa1, 0xb2, 0x6a, 0x8f, 0x27, 0xe4, 0xf1, 0x03, 0x46, 0xa8, 0xbf,
|
||||
0x32, 0x4d, 0x71, 0x52, 0x2e, 0xc0, 0xc2, 0x1a, 0xdd, 0x84, 0x6c, 0x9d, 0xbf, 0x87, 0x2c, 0xcc,
|
||||
0x13, 0x09, 0x34, 0xbd, 0xbf, 0x3f, 0xbc, 0xc7, 0x17, 0x12, 0xec, 0x11, 0xa0, 0x26, 0xcc, 0xb1,
|
||||
0x48, 0xe0, 0xc4, 0x23, 0x25, 0xaf, 0x40, 0x43, 0x23, 0xad, 0xa2, 0x6e, 0xa7, 0x38, 0x17, 0x55,
|
||||
0xe1, 0x3e, 0x6e, 0xa5, 0x0c, 0xd3, 0x3d, 0xd7, 0xda, 0xbf, 0x8f, 0xaa, 0x97, 0x9f, 0xbd, 0x2c,
|
||||
0x8c, 0x3d, 0x7f, 0x59, 0x18, 0x7b, 0xf1, 0xb2, 0x30, 0xf6, 0x65, 0xb7, 0x90, 0x7a, 0xd6, 0x2d,
|
||||
0xa4, 0x9e, 0x77, 0x0b, 0xa9, 0x17, 0xdd, 0x42, 0xea, 0xb7, 0x6e, 0x21, 0xf5, 0xf4, 0xf7, 0xc2,
|
||||
0xd8, 0xe3, 0x23, 0xb1, 0xff, 0x13, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x0a, 0x3e, 0x83,
|
||||
0x48, 0x15, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
|
||||
@ -1244,9 +1242,11 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
if m.NominalConcurrencyShares != nil {
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
@ -2007,7 +2007,9 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) {
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares))
|
||||
if m.NominalConcurrencyShares != nil {
|
||||
n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares))
|
||||
}
|
||||
l = m.LimitResponse.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if m.LendablePercent != nil {
|
||||
@ -2384,7 +2386,7 @@ func (this *LimitedPriorityLevelConfiguration) String() string {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`,
|
||||
`AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`,
|
||||
`NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`,
|
||||
`LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`,
|
||||
`LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`,
|
||||
`BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`,
|
||||
@ -3713,9 +3715,9 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error {
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType)
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType)
|
||||
}
|
||||
m.AssuredConcurrencyShares = 0
|
||||
var v int32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
@ -3725,11 +3727,12 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.AssuredConcurrencyShares |= int32(b&0x7F) << shift
|
||||
v |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.NominalConcurrencyShares = &v
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType)
|
@ -19,14 +19,14 @@ limitations under the License.
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package k8s.io.api.flowcontrol.v1alpha1;
|
||||
package k8s.io.api.flowcontrol.v1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "k8s.io/api/flowcontrol/v1alpha1";
|
||||
option go_package = "k8s.io/api/flowcontrol/v1";
|
||||
|
||||
// ExemptPriorityLevelConfiguration describes the configurable aspects
|
||||
// of the handling of exempt requests.
|
||||
@ -153,6 +153,8 @@ message FlowSchemaStatus {
|
||||
// `conditions` is a list of the current states of FlowSchema.
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
repeated FlowSchemaCondition conditions = 1;
|
||||
}
|
||||
@ -190,23 +192,28 @@ message LimitResponse {
|
||||
// - How are requests for this priority level limited?
|
||||
// - What should be done with requests that exceed the limit?
|
||||
message LimitedPriorityLevelConfiguration {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
// the assured concurrency value (ACV) --- the number of requests
|
||||
// that may be executing at a time --- for each such priority
|
||||
// level:
|
||||
// `nominalConcurrencyShares` (NCS) contributes to the computation of the
|
||||
// NominalConcurrencyLimit (NominalCL) of this level.
|
||||
// This is the number of execution seats available at this priority level.
|
||||
// This is used both for requests dispatched from this priority level
|
||||
// as well as requests dispatched from other priority levels
|
||||
// borrowing seats from this level.
|
||||
// The server's concurrency limit (ServerCL) is divided among the
|
||||
// Limited priority levels in proportion to their NCS values:
|
||||
//
|
||||
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
|
||||
// NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
|
||||
// sum_ncs = sum[priority level k] NCS(k)
|
||||
//
|
||||
// Bigger numbers mean a larger nominal concurrency limit,
|
||||
// at the expense of every other priority level.
|
||||
//
|
||||
// If not specified, this field defaults to a value of 30.
|
||||
//
|
||||
// Setting this field to zero supports the construction of a
|
||||
// "jail" for this priority level that is used to hold some request(s)
|
||||
//
|
||||
// bigger numbers of ACS mean more reserved concurrent requests (at the
|
||||
// expense of every other PL).
|
||||
// This field has a default value of 30.
|
||||
// +optional
|
||||
optional int32 assuredConcurrencyShares = 1;
|
||||
optional int32 nominalConcurrencyShares = 1;
|
||||
|
||||
// `limitResponse` indicates what to do with requests that can not be executed right now
|
||||
optional LimitResponse limitResponse = 2;
|
||||
@ -381,6 +388,8 @@ message PriorityLevelConfigurationStatus {
|
||||
// `conditions` is the current state of "request-priority".
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
repeated PriorityLevelConfigurationCondition conditions = 1;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -26,7 +26,7 @@ import (
|
||||
const GroupName = "flowcontrol.apiserver.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
107
vendor/k8s.io/api/flowcontrol/v1alpha1/types.go → vendor/k8s.io/api/flowcontrol/v1/types.go
generated
vendored
107
vendor/k8s.io/api/flowcontrol/v1alpha1/types.go → vendor/k8s.io/api/flowcontrol/v1/types.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -57,13 +57,55 @@ const (
|
||||
ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
|
||||
)
|
||||
|
||||
const (
|
||||
// AutoUpdateAnnotationKey is the name of an annotation that enables
|
||||
// automatic update of the spec of the bootstrap configuration
|
||||
// object(s), if set to 'true'.
|
||||
//
|
||||
// On a fresh install, all bootstrap configuration objects will have auto
|
||||
// update enabled with the following annotation key:
|
||||
// apf.kubernetes.io/autoupdate-spec: 'true'
|
||||
//
|
||||
// The kube-apiserver periodically checks the bootstrap configuration
|
||||
// objects on the cluster and applies updates if necessary.
|
||||
//
|
||||
// kube-apiserver enforces an 'always auto-update' policy for the
|
||||
// mandatory configuration object(s). This implies:
|
||||
// - the auto-update annotation key is added with a value of 'true'
|
||||
// if it is missing.
|
||||
// - the auto-update annotation key is set to 'true' if its current value
|
||||
// is a boolean false or has an invalid boolean representation
|
||||
// (if the cluster operator sets it to 'false' it will be stomped)
|
||||
// - any changes to the spec made by the cluster operator will be
|
||||
// stomped, except for changes to the `nominalConcurrencyShares`
|
||||
// and `lendablePercent` fields of the PriorityLevelConfiguration
|
||||
// named "exempt".
|
||||
//
|
||||
// The kube-apiserver will apply updates on the suggested configuration if:
|
||||
// - the cluster operator has enabled auto-update by setting the annotation
|
||||
// (apf.kubernetes.io/autoupdate-spec: 'true') or
|
||||
// - the annotation key is missing but the generation is 1
|
||||
//
|
||||
// If the suggested configuration object is missing the annotation key,
|
||||
// kube-apiserver will update the annotation appropriately:
|
||||
// - it is set to 'true' if generation of the object is '1' which usually
|
||||
// indicates that the spec of the object has not been changed.
|
||||
// - it is set to 'false' if generation of the object is greater than 1.
|
||||
//
|
||||
// The goal is to enable the kube-apiserver to apply update on suggested
|
||||
// configuration objects installed by previous releases but not overwrite
|
||||
// changes made by the cluster operators.
|
||||
// Note that this distinction is imperfectly detected: in the case where an
|
||||
// operator deletes a suggested configuration object and later creates it
|
||||
// but with a variant spec and then does no updates of the object
|
||||
// (generation is 1), the technique outlined above will incorrectly
|
||||
// determine that the object should be auto-updated.
|
||||
AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.20
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.21
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema
|
||||
|
||||
// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
|
||||
// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
|
||||
@ -84,10 +126,6 @@ type FlowSchema struct {
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.20
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.21
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList
|
||||
|
||||
// FlowSchemaList is a list of FlowSchema objects.
|
||||
type FlowSchemaList struct {
|
||||
@ -314,8 +352,10 @@ type FlowSchemaStatus struct {
|
||||
// `conditions` is a list of the current states of FlowSchema.
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
|
||||
Conditions []FlowSchemaCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// FlowSchemaCondition describes conditions for a FlowSchema.
|
||||
@ -341,10 +381,6 @@ type FlowSchemaConditionType string
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.20
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.21
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration
|
||||
|
||||
// PriorityLevelConfiguration represents the configuration of a priority level.
|
||||
type PriorityLevelConfiguration struct {
|
||||
@ -364,10 +400,6 @@ type PriorityLevelConfiguration struct {
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.20
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.21
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList
|
||||
|
||||
// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.
|
||||
type PriorityLevelConfigurationList struct {
|
||||
@ -426,23 +458,28 @@ const (
|
||||
// - How are requests for this priority level limited?
|
||||
// - What should be done with requests that exceed the limit?
|
||||
type LimitedPriorityLevelConfiguration struct {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
// the assured concurrency value (ACV) --- the number of requests
|
||||
// that may be executing at a time --- for each such priority
|
||||
// level:
|
||||
// `nominalConcurrencyShares` (NCS) contributes to the computation of the
|
||||
// NominalConcurrencyLimit (NominalCL) of this level.
|
||||
// This is the number of execution seats available at this priority level.
|
||||
// This is used both for requests dispatched from this priority level
|
||||
// as well as requests dispatched from other priority levels
|
||||
// borrowing seats from this level.
|
||||
// The server's concurrency limit (ServerCL) is divided among the
|
||||
// Limited priority levels in proportion to their NCS values:
|
||||
//
|
||||
// ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
|
||||
// NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
|
||||
// sum_ncs = sum[priority level k] NCS(k)
|
||||
//
|
||||
// Bigger numbers mean a larger nominal concurrency limit,
|
||||
// at the expense of every other priority level.
|
||||
//
|
||||
// If not specified, this field defaults to a value of 30.
|
||||
//
|
||||
// Setting this field to zero supports the construction of a
|
||||
// "jail" for this priority level that is used to hold some request(s)
|
||||
//
|
||||
// bigger numbers of ACS mean more reserved concurrent requests (at the
|
||||
// expense of every other PL).
|
||||
// This field has a default value of 30.
|
||||
// +optional
|
||||
AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"`
|
||||
NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"`
|
||||
|
||||
// `limitResponse` indicates what to do with requests that can not be executed right now
|
||||
LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"`
|
||||
@ -586,8 +623,10 @@ type PriorityLevelConfigurationStatus struct {
|
||||
// `conditions` is the current state of "request-priority".
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
|
||||
Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// PriorityLevelConfigurationCondition defines the condition of priority level.
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
package v1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_LimitedPriorityLevelConfiguration = map[string]string{
|
||||
"": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
|
||||
"nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\n\nIf not specified, this field defaults to a value of 30.\n\nSetting this field to zero supports the construction of a \"jail\" for this priority level that is used to hold some request(s)",
|
||||
"limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now",
|
||||
"lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
|
||||
"borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.",
|
@ -19,7 +19,7 @@ limitations under the License.
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
package v1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
@ -237,6 +237,11 @@ func (in *LimitResponse) DeepCopy() *LimitResponse {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) {
|
||||
*out = *in
|
||||
if in.NominalConcurrencyShares != nil {
|
||||
in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
in.LimitResponse.DeepCopyInto(&out.LimitResponse)
|
||||
if in.LendablePercent != nil {
|
||||
in, out := &in.LendablePercent, &out.LendablePercent
|
122
vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
122
vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 18
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 20
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *FlowSchema) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 21
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 18
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 20
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 21
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 18
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 20
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 21
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 18
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 20
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 21
|
||||
}
|
2
vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
generated
vendored
2
vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
generated
vendored
@ -192,7 +192,7 @@ message LimitResponse {
|
||||
message LimitedPriorityLevelConfiguration {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// priority level that may be executing at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
|
2
vendor/k8s.io/api/flowcontrol/v1beta1/types.go
generated
vendored
2
vendor/k8s.io/api/flowcontrol/v1beta1/types.go
generated
vendored
@ -466,7 +466,7 @@ const (
|
||||
type LimitedPriorityLevelConfiguration struct {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// priority level that may be executing at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
|
2
vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
generated
vendored
@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_LimitedPriorityLevelConfiguration = map[string]string{
|
||||
"": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
|
||||
"limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now",
|
||||
"lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
|
||||
"borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.",
|
||||
|
2
vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
generated
vendored
2
vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
generated
vendored
@ -192,7 +192,7 @@ message LimitResponse {
|
||||
message LimitedPriorityLevelConfiguration {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// priority level that may be executing at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
|
2
vendor/k8s.io/api/flowcontrol/v1beta2/types.go
generated
vendored
2
vendor/k8s.io/api/flowcontrol/v1beta2/types.go
generated
vendored
@ -466,7 +466,7 @@ const (
|
||||
type LimitedPriorityLevelConfiguration struct {
|
||||
// `assuredConcurrencyShares` (ACS) configures the execution
|
||||
// limit, which is a limit on the number of requests of this
|
||||
// priority level that may be exeucting at a given time. ACS must
|
||||
// priority level that may be executing at a given time. ACS must
|
||||
// be a positive number. The server's concurrency limit (SCL) is
|
||||
// divided among the concurrency-controlled priority levels in
|
||||
// proportion to their assured concurrency shares. This produces
|
||||
|
2
vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go
generated
vendored
@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_LimitedPriorityLevelConfiguration = map[string]string{
|
||||
"": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
|
||||
"assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ",
|
||||
"limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now",
|
||||
"lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
|
||||
"borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.",
|
||||
|
18
vendor/k8s.io/api/flowcontrol/v1beta3/types.go
generated
vendored
18
vendor/k8s.io/api/flowcontrol/v1beta3/types.go
generated
vendored
@ -103,10 +103,25 @@ const (
|
||||
AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec"
|
||||
)
|
||||
|
||||
const (
|
||||
// This annotation is only for use in v1beta3.
|
||||
//
|
||||
// The presence of this annotation in a v1beta3 object means that
|
||||
// a zero value in the 'NominalConcurrencyShares' field means zero
|
||||
// rather than the old default of 30.
|
||||
//
|
||||
// To set a zero value for the 'NominalConcurrencyShares' field in v1beta3,
|
||||
// set the annotation to an empty string:
|
||||
// "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares": ""
|
||||
//
|
||||
PriorityLevelPreserveZeroConcurrencySharesKey = "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchema
|
||||
|
||||
// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
|
||||
// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
|
||||
@ -128,6 +143,7 @@ type FlowSchema struct {
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchemaList
|
||||
|
||||
// FlowSchemaList is a list of FlowSchema objects.
|
||||
type FlowSchemaList struct {
|
||||
@ -384,6 +400,7 @@ type FlowSchemaConditionType string
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfiguration
|
||||
|
||||
// PriorityLevelConfiguration represents the configuration of a priority level.
|
||||
type PriorityLevelConfiguration struct {
|
||||
@ -404,6 +421,7 @@ type PriorityLevelConfiguration struct {
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfigurationList
|
||||
|
||||
// PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.
|
||||
type PriorityLevelConfigurationList struct {
|
||||
|
28
vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go
generated
vendored
28
vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -21,6 +21,10 @@ limitations under the License.
|
||||
|
||||
package v1beta3
|
||||
|
||||
import (
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) {
|
||||
@ -33,6 +37,12 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *FlowSchema) APILifecycleRemoved() (major, minor int) {
|
||||
@ -51,6 +61,12 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchemaList"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) {
|
||||
@ -69,6 +85,12 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) {
|
||||
@ -87,6 +109,12 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=<group>,<version>,<kind>" tags in types.go.
|
||||
func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfigurationList"}
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) {
|
||||
|
1626
vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
generated
vendored
1626
vendor/k8s.io/api/networking/v1alpha1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
119
vendor/k8s.io/api/networking/v1alpha1/generated.proto
generated
vendored
119
vendor/k8s.io/api/networking/v1alpha1/generated.proto
generated
vendored
@ -21,7 +21,6 @@ syntax = "proto2";
|
||||
|
||||
package k8s.io.api.networking.v1alpha1;
|
||||
|
||||
import "k8s.io/api/core/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
@ -29,69 +28,6 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "k8s.io/api/networking/v1alpha1";
|
||||
|
||||
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||
// resources, all of which will be considered when allocating a CIDR for a
|
||||
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||
// selector matches the node in question and has free CIDRs to allocate. In
|
||||
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||
// selector matches the Node may be used.
|
||||
message ClusterCIDR {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// spec is the desired state of the ClusterCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional ClusterCIDRSpec spec = 2;
|
||||
}
|
||||
|
||||
// ClusterCIDRList contains a list of ClusterCIDR.
|
||||
message ClusterCIDRList {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of ClusterCIDRs.
|
||||
repeated ClusterCIDR items = 2;
|
||||
}
|
||||
|
||||
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||
message ClusterCIDRSpec {
|
||||
// nodeSelector defines which nodes the config is applicable to.
|
||||
// An empty or nil nodeSelector selects all nodes.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1;
|
||||
|
||||
// perNodeHostBits defines the number of host bits to be configured per node.
|
||||
// A subnet mask determines how much of the address is used for network bits
|
||||
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||
// Minimum value is 4 (16 IPs).
|
||||
// This field is immutable.
|
||||
// +required
|
||||
optional int32 perNodeHostBits = 2;
|
||||
|
||||
// ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||
// At least one of ipv4 and ipv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
optional string ipv4 = 3;
|
||||
|
||||
// ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
|
||||
// At least one of ipv4 and ipv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
optional string ipv6 = 4;
|
||||
}
|
||||
|
||||
// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
|
||||
// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
|
||||
// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
|
||||
@ -147,9 +83,56 @@ message ParentReference {
|
||||
// Name is the name of the object being referenced.
|
||||
// +required
|
||||
optional string name = 4;
|
||||
|
||||
// UID is the uid of the object being referenced.
|
||||
// +optional
|
||||
optional string uid = 5;
|
||||
}
|
||||
|
||||
// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
|
||||
// This range is used to allocate ClusterIPs to Service objects.
|
||||
message ServiceCIDR {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// spec is the desired state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional ServiceCIDRSpec spec = 2;
|
||||
|
||||
// status represents the current state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional ServiceCIDRStatus status = 3;
|
||||
}
|
||||
|
||||
// ServiceCIDRList contains a list of ServiceCIDR objects.
|
||||
message ServiceCIDRList {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of ServiceCIDRs.
|
||||
repeated ServiceCIDR items = 2;
|
||||
}
|
||||
|
||||
// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
|
||||
message ServiceCIDRSpec {
|
||||
// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
|
||||
// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
repeated string cidrs = 1;
|
||||
}
|
||||
|
||||
// ServiceCIDRStatus describes the current state of the ServiceCIDR.
|
||||
message ServiceCIDRStatus {
|
||||
// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
|
||||
// Current service state
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/api/networking/v1alpha1/register.go
generated
vendored
4
vendor/k8s.io/api/networking/v1alpha1/register.go
generated
vendored
@ -52,10 +52,10 @@ var (
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&ClusterCIDR{},
|
||||
&ClusterCIDRList{},
|
||||
&IPAddress{},
|
||||
&IPAddressList{},
|
||||
&ServiceCIDR{},
|
||||
&ServiceCIDRList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
|
147
vendor/k8s.io/api/networking/v1alpha1/types.go
generated
vendored
147
vendor/k8s.io/api/networking/v1alpha1/types.go
generated
vendored
@ -17,86 +17,9 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.25
|
||||
|
||||
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||
// resources, all of which will be considered when allocating a CIDR for a
|
||||
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||
// selector matches the node in question and has free CIDRs to allocate. In
|
||||
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||
// selector matches the Node may be used.
|
||||
type ClusterCIDR struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// spec is the desired state of the ClusterCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||
type ClusterCIDRSpec struct {
|
||||
// nodeSelector defines which nodes the config is applicable to.
|
||||
// An empty or nil nodeSelector selects all nodes.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
|
||||
|
||||
// perNodeHostBits defines the number of host bits to be configured per node.
|
||||
// A subnet mask determines how much of the address is used for network bits
|
||||
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||
// Minimum value is 4 (16 IPs).
|
||||
// This field is immutable.
|
||||
// +required
|
||||
PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"`
|
||||
|
||||
// ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||
// At least one of ipv4 and ipv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"`
|
||||
|
||||
// ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
|
||||
// At least one of ipv4 and ipv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.25
|
||||
|
||||
// ClusterCIDRList contains a list of ClusterCIDR.
|
||||
type ClusterCIDRList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// items is the list of ClusterCIDRs.
|
||||
Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -143,9 +66,6 @@ type ParentReference struct {
|
||||
// Name is the name of the object being referenced.
|
||||
// +required
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"`
|
||||
// UID is the uid of the object being referenced.
|
||||
// +optional
|
||||
UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -161,3 +81,70 @@ type IPAddressList struct {
|
||||
// items is the list of IPAddresses.
|
||||
Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.27
|
||||
|
||||
// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
|
||||
// This range is used to allocate ClusterIPs to Service objects.
|
||||
type ServiceCIDR struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// spec is the desired state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
// status represents the current state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
|
||||
type ServiceCIDRSpec struct {
|
||||
// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
|
||||
// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
|
||||
}
|
||||
|
||||
const (
|
||||
// ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
|
||||
// apiserver to allocate ClusterIPs for Services.
|
||||
ServiceCIDRConditionReady = "Ready"
|
||||
// ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
|
||||
// being deleted.
|
||||
ServiceCIDRReasonTerminating = "Terminating"
|
||||
)
|
||||
|
||||
// ServiceCIDRStatus describes the current state of the ServiceCIDR.
|
||||
type ServiceCIDRStatus struct {
|
||||
// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
|
||||
// Current service state
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.27
|
||||
|
||||
// ServiceCIDRList contains a list of ServiceCIDR objects.
|
||||
type ServiceCIDRList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// items is the list of ServiceCIDRs.
|
||||
Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
72
vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
generated
vendored
72
vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go
generated
vendored
@ -27,38 +27,6 @@ package v1alpha1
|
||||
// Those methods can be generated by using hack/update-codegen.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_ClusterCIDR = map[string]string{
|
||||
"": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (ClusterCIDR) SwaggerDoc() map[string]string {
|
||||
return map_ClusterCIDR
|
||||
}
|
||||
|
||||
var map_ClusterCIDRList = map[string]string{
|
||||
"": "ClusterCIDRList contains a list of ClusterCIDR.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "items is the list of ClusterCIDRs.",
|
||||
}
|
||||
|
||||
func (ClusterCIDRList) SwaggerDoc() map[string]string {
|
||||
return map_ClusterCIDRList
|
||||
}
|
||||
|
||||
var map_ClusterCIDRSpec = map[string]string{
|
||||
"": "ClusterCIDRSpec defines the desired state of ClusterCIDR.",
|
||||
"nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.",
|
||||
"perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.",
|
||||
"ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.",
|
||||
"ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.",
|
||||
}
|
||||
|
||||
func (ClusterCIDRSpec) SwaggerDoc() map[string]string {
|
||||
return map_ClusterCIDRSpec
|
||||
}
|
||||
|
||||
var map_IPAddress = map[string]string{
|
||||
"": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
@ -94,11 +62,49 @@ var map_ParentReference = map[string]string{
|
||||
"resource": "Resource is the resource of the object being referenced.",
|
||||
"namespace": "Namespace is the namespace of the object being referenced.",
|
||||
"name": "Name is the name of the object being referenced.",
|
||||
"uid": "UID is the uid of the object being referenced.",
|
||||
}
|
||||
|
||||
func (ParentReference) SwaggerDoc() map[string]string {
|
||||
return map_ParentReference
|
||||
}
|
||||
|
||||
var map_ServiceCIDR = map[string]string{
|
||||
"": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
"status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (ServiceCIDR) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDR
|
||||
}
|
||||
|
||||
var map_ServiceCIDRList = map[string]string{
|
||||
"": "ServiceCIDRList contains a list of ServiceCIDR objects.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "items is the list of ServiceCIDRs.",
|
||||
}
|
||||
|
||||
func (ServiceCIDRList) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDRList
|
||||
}
|
||||
|
||||
var map_ServiceCIDRSpec = map[string]string{
|
||||
"": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
|
||||
"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
|
||||
}
|
||||
|
||||
func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDRSpec
|
||||
}
|
||||
|
||||
var map_ServiceCIDRStatus = map[string]string{
|
||||
"": "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
|
||||
"conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
|
||||
}
|
||||
|
||||
func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDRStatus
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
188
vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
generated
vendored
188
vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@ -22,91 +22,10 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR.
|
||||
func (in *ClusterCIDR) DeepCopy() *ClusterCIDR {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterCIDR)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterCIDR) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ClusterCIDR, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList.
|
||||
func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterCIDRList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterCIDRList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) {
|
||||
*out = *in
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(v1.NodeSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec.
|
||||
func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterCIDRSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAddress) DeepCopyInto(out *IPAddress) {
|
||||
*out = *in
|
||||
@ -203,3 +122,108 @@ func (in *ParentReference) DeepCopy() *ParentReference {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
|
||||
func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDR)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ServiceCIDR, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
|
||||
func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
|
||||
*out = *in
|
||||
if in.CIDRs != nil {
|
||||
in, out := &in.CIDRs, &out.CIDRs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
|
||||
func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
|
||||
func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
72
vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
72
vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -21,42 +21,6 @@ limitations under the License.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 25
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 28
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 31
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 25
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 28
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 31
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *IPAddress) APILifecycleIntroduced() (major, minor int) {
|
||||
@ -92,3 +56,39 @@ func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) {
|
||||
func (in *IPAddressList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 33
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 27
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 33
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 27
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 33
|
||||
}
|
||||
|
2
vendor/k8s.io/api/policy/v1/doc.go
generated
vendored
2
vendor/k8s.io/api/policy/v1/doc.go
generated
vendored
@ -19,6 +19,6 @@ limitations under the License.
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// Package policy is for any kind of policy object. Suitable examples, even if
|
||||
// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy,
|
||||
// they aren't all here, are PodDisruptionBudget,
|
||||
// NetworkPolicy, etc.
|
||||
package v1 // import "k8s.io/api/policy/v1"
|
||||
|
2
vendor/k8s.io/api/policy/v1beta1/doc.go
generated
vendored
2
vendor/k8s.io/api/policy/v1beta1/doc.go
generated
vendored
@ -20,6 +20,6 @@ limitations under the License.
|
||||
// +k8s:prerelease-lifecycle-gen=true
|
||||
|
||||
// Package policy is for any kind of policy object. Suitable examples, even if
|
||||
// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy,
|
||||
// they aren't all here, are PodDisruptionBudget,
|
||||
// NetworkPolicy, etc.
|
||||
package v1beta1 // import "k8s.io/api/policy/v1beta1"
|
||||
|
4004
vendor/k8s.io/api/policy/v1beta1/generated.pb.go
generated
vendored
4004
vendor/k8s.io/api/policy/v1beta1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
277
vendor/k8s.io/api/policy/v1beta1/generated.proto
generated
vendored
277
vendor/k8s.io/api/policy/v1beta1/generated.proto
generated
vendored
@ -21,7 +21,6 @@ syntax = "proto2";
|
||||
|
||||
package k8s.io.api.policy.v1beta1;
|
||||
|
||||
import "k8s.io/api/core/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
@ -30,35 +29,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "k8s.io/api/policy/v1beta1";
|
||||
|
||||
// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.
|
||||
message AllowedCSIDriver {
|
||||
// Name is the registered name of the CSI driver
|
||||
optional string name = 1;
|
||||
}
|
||||
|
||||
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
|
||||
message AllowedFlexVolume {
|
||||
// driver is the name of the Flexvolume driver.
|
||||
optional string driver = 1;
|
||||
}
|
||||
|
||||
// AllowedHostPath defines the host volume conditions that will be enabled by a policy
|
||||
// for pods to use. It requires the path prefix to be defined.
|
||||
message AllowedHostPath {
|
||||
// pathPrefix is the path prefix that the host volume must match.
|
||||
// It does not support `*`.
|
||||
// Trailing slashes are trimmed when validating the path prefix with a host path.
|
||||
//
|
||||
// Examples:
|
||||
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
|
||||
// `/foo` would not allow `/food` or `/etc/foo`
|
||||
optional string pathPrefix = 1;
|
||||
|
||||
// when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
|
||||
// +optional
|
||||
optional bool readOnly = 2;
|
||||
}
|
||||
|
||||
// Eviction evicts a pod from its node subject to certain policies and safety constraints.
|
||||
// This is a subresource of Pod. A request to cause such an eviction is
|
||||
// created by POSTing to .../pods/<pod name>/evictions.
|
||||
@ -72,37 +42,6 @@ message Eviction {
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
|
||||
}
|
||||
|
||||
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
|
||||
message FSGroupStrategyOptions {
|
||||
// rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
|
||||
// +optional
|
||||
optional string rule = 1;
|
||||
|
||||
// ranges are the allowed ranges of fs groups. If you would like to force a single
|
||||
// fs group then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
repeated IDRange ranges = 2;
|
||||
}
|
||||
|
||||
// HostPortRange defines a range of host ports that will be enabled by a policy
|
||||
// for pods to use. It requires both the start and end to be defined.
|
||||
message HostPortRange {
|
||||
// min is the start of the range, inclusive.
|
||||
optional int32 min = 1;
|
||||
|
||||
// max is the end of the range, inclusive.
|
||||
optional int32 max = 2;
|
||||
}
|
||||
|
||||
// IDRange provides a min/max of an allowed range of IDs.
|
||||
message IDRange {
|
||||
// min is the start of the range, inclusive.
|
||||
optional int64 min = 1;
|
||||
|
||||
// max is the end of the range, inclusive.
|
||||
optional int64 max = 2;
|
||||
}
|
||||
|
||||
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
|
||||
message PodDisruptionBudget {
|
||||
// Standard object's metadata.
|
||||
@ -238,219 +177,3 @@ message PodDisruptionBudgetStatus {
|
||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7;
|
||||
}
|
||||
|
||||
// PodSecurityPolicy governs the ability to make requests that affect the Security Context
|
||||
// that will be applied to a pod and container.
|
||||
// Deprecated in 1.21.
|
||||
message PodSecurityPolicy {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// spec defines the policy enforced.
|
||||
// +optional
|
||||
optional PodSecurityPolicySpec spec = 2;
|
||||
}
|
||||
|
||||
// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
|
||||
message PodSecurityPolicyList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is a list of schema objects.
|
||||
repeated PodSecurityPolicy items = 2;
|
||||
}
|
||||
|
||||
// PodSecurityPolicySpec defines the policy enforced.
|
||||
message PodSecurityPolicySpec {
|
||||
// privileged determines if a pod can request to be run as privileged.
|
||||
// +optional
|
||||
optional bool privileged = 1;
|
||||
|
||||
// defaultAddCapabilities is the default set of capabilities that will be added to the container
|
||||
// unless the pod spec specifically drops the capability. You may not list a capability in both
|
||||
// defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly
|
||||
// allowed, and need not be included in the allowedCapabilities list.
|
||||
// +optional
|
||||
repeated string defaultAddCapabilities = 2;
|
||||
|
||||
// requiredDropCapabilities are the capabilities that will be dropped from the container. These
|
||||
// are required to be dropped and cannot be added.
|
||||
// +optional
|
||||
repeated string requiredDropCapabilities = 3;
|
||||
|
||||
// allowedCapabilities is a list of capabilities that can be requested to add to the container.
|
||||
// Capabilities in this field may be added at the pod author's discretion.
|
||||
// You must not list a capability in both allowedCapabilities and requiredDropCapabilities.
|
||||
// +optional
|
||||
repeated string allowedCapabilities = 4;
|
||||
|
||||
// volumes is an allowlist of volume plugins. Empty indicates that
|
||||
// no volumes may be used. To allow all volumes you may use '*'.
|
||||
// +optional
|
||||
repeated string volumes = 5;
|
||||
|
||||
// hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
|
||||
// +optional
|
||||
optional bool hostNetwork = 6;
|
||||
|
||||
// hostPorts determines which host port ranges are allowed to be exposed.
|
||||
// +optional
|
||||
repeated HostPortRange hostPorts = 7;
|
||||
|
||||
// hostPID determines if the policy allows the use of HostPID in the pod spec.
|
||||
// +optional
|
||||
optional bool hostPID = 8;
|
||||
|
||||
// hostIPC determines if the policy allows the use of HostIPC in the pod spec.
|
||||
// +optional
|
||||
optional bool hostIPC = 9;
|
||||
|
||||
// seLinux is the strategy that will dictate the allowable labels that may be set.
|
||||
optional SELinuxStrategyOptions seLinux = 10;
|
||||
|
||||
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
optional RunAsUserStrategyOptions runAsUser = 11;
|
||||
|
||||
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
|
||||
// RunAsGroup feature gate to be enabled.
|
||||
// +optional
|
||||
optional RunAsGroupStrategyOptions runAsGroup = 22;
|
||||
|
||||
// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
|
||||
optional SupplementalGroupsStrategyOptions supplementalGroups = 12;
|
||||
|
||||
// fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
|
||||
optional FSGroupStrategyOptions fsGroup = 13;
|
||||
|
||||
// readOnlyRootFilesystem when set to true will force containers to run with a read only root file
|
||||
// system. If the container specifically requests to run with a non-read only root file system
|
||||
// the PSP should deny the pod.
|
||||
// If set to false the container may run with a read only root file system if it wishes but it
|
||||
// will not be forced to.
|
||||
// +optional
|
||||
optional bool readOnlyRootFilesystem = 14;
|
||||
|
||||
// defaultAllowPrivilegeEscalation controls the default setting for whether a
|
||||
// process can gain more privileges than its parent process.
|
||||
// +optional
|
||||
optional bool defaultAllowPrivilegeEscalation = 15;
|
||||
|
||||
// allowPrivilegeEscalation determines if a pod can request to allow
|
||||
// privilege escalation. If unspecified, defaults to true.
|
||||
// +optional
|
||||
optional bool allowPrivilegeEscalation = 16;
|
||||
|
||||
// allowedHostPaths is an allowlist of host paths. Empty indicates
|
||||
// that all host paths may be used.
|
||||
// +optional
|
||||
repeated AllowedHostPath allowedHostPaths = 17;
|
||||
|
||||
// allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all
|
||||
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
|
||||
// is allowed in the "volumes" field.
|
||||
// +optional
|
||||
repeated AllowedFlexVolume allowedFlexVolumes = 18;
|
||||
|
||||
// AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec.
|
||||
// An empty value indicates that any CSI driver can be used for inline ephemeral volumes.
|
||||
// +optional
|
||||
repeated AllowedCSIDriver allowedCSIDrivers = 23;
|
||||
|
||||
// allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
|
||||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
|
||||
// Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
|
||||
// +optional
|
||||
repeated string allowedUnsafeSysctls = 19;
|
||||
|
||||
// forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
|
||||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
|
||||
// +optional
|
||||
repeated string forbiddenSysctls = 20;
|
||||
|
||||
// AllowedProcMountTypes is an allowlist of allowed ProcMountTypes.
|
||||
// Empty or nil indicates that only the DefaultProcMountType may be used.
|
||||
// This requires the ProcMountType feature flag to be enabled.
|
||||
// +optional
|
||||
repeated string allowedProcMountTypes = 21;
|
||||
|
||||
// runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod.
|
||||
// If this field is omitted, the pod's runtimeClassName field is unrestricted.
|
||||
// Enforcement of this field depends on the RuntimeClass feature gate being enabled.
|
||||
// +optional
|
||||
optional RuntimeClassStrategyOptions runtimeClass = 24;
|
||||
}
|
||||
|
||||
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
message RunAsGroupStrategyOptions {
|
||||
// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
optional string rule = 1;
|
||||
|
||||
// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
repeated IDRange ranges = 2;
|
||||
}
|
||||
|
||||
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
message RunAsUserStrategyOptions {
|
||||
// rule is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
optional string rule = 1;
|
||||
|
||||
// ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
repeated IDRange ranges = 2;
|
||||
}
|
||||
|
||||
// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses
|
||||
// for a pod.
|
||||
message RuntimeClassStrategyOptions {
|
||||
// allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod.
|
||||
// A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the
|
||||
// list. An empty list requires the RuntimeClassName field to be unset.
|
||||
repeated string allowedRuntimeClassNames = 1;
|
||||
|
||||
// defaultRuntimeClassName is the default RuntimeClassName to set on the pod.
|
||||
// The default MUST be allowed by the allowedRuntimeClassNames list.
|
||||
// A value of nil does not mutate the Pod.
|
||||
// +optional
|
||||
optional string defaultRuntimeClassName = 2;
|
||||
}
|
||||
|
||||
// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
message SELinuxStrategyOptions {
|
||||
// rule is the strategy that will dictate the allowable labels that may be set.
|
||||
optional string rule = 1;
|
||||
|
||||
// seLinuxOptions required to run as; required for MustRunAs
|
||||
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
// +optional
|
||||
optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2;
|
||||
}
|
||||
|
||||
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
|
||||
message SupplementalGroupsStrategyOptions {
|
||||
// rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
|
||||
// +optional
|
||||
optional string rule = 1;
|
||||
|
||||
// ranges are the allowed ranges of supplemental groups. If you would like to force a single
|
||||
// supplemental group then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
repeated IDRange ranges = 2;
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/api/policy/v1beta1/register.go
generated
vendored
2
vendor/k8s.io/api/policy/v1beta1/register.go
generated
vendored
@ -46,8 +46,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&PodDisruptionBudget{},
|
||||
&PodDisruptionBudgetList{},
|
||||
&PodSecurityPolicy{},
|
||||
&PodSecurityPolicyList{},
|
||||
&Eviction{},
|
||||
)
|
||||
// Add the watch version that applies
|
||||
|
371
vendor/k8s.io/api/policy/v1beta1/types.go
generated
vendored
371
vendor/k8s.io/api/policy/v1beta1/types.go
generated
vendored
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
@ -228,373 +227,3 @@ type Eviction struct {
|
||||
// +optional
|
||||
DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.10
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.21
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.25
|
||||
|
||||
// PodSecurityPolicy governs the ability to make requests that affect the Security Context
|
||||
// that will be applied to a pod and container.
|
||||
// Deprecated in 1.21.
|
||||
type PodSecurityPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// spec defines the policy enforced.
|
||||
// +optional
|
||||
Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// PodSecurityPolicySpec defines the policy enforced.
|
||||
type PodSecurityPolicySpec struct {
|
||||
// privileged determines if a pod can request to be run as privileged.
|
||||
// +optional
|
||||
Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"`
|
||||
// defaultAddCapabilities is the default set of capabilities that will be added to the container
|
||||
// unless the pod spec specifically drops the capability. You may not list a capability in both
|
||||
// defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly
|
||||
// allowed, and need not be included in the allowedCapabilities list.
|
||||
// +optional
|
||||
DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"`
|
||||
// requiredDropCapabilities are the capabilities that will be dropped from the container. These
|
||||
// are required to be dropped and cannot be added.
|
||||
// +optional
|
||||
RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"`
|
||||
// allowedCapabilities is a list of capabilities that can be requested to add to the container.
|
||||
// Capabilities in this field may be added at the pod author's discretion.
|
||||
// You must not list a capability in both allowedCapabilities and requiredDropCapabilities.
|
||||
// +optional
|
||||
AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"`
|
||||
// volumes is an allowlist of volume plugins. Empty indicates that
|
||||
// no volumes may be used. To allow all volumes you may use '*'.
|
||||
// +optional
|
||||
Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"`
|
||||
// hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
|
||||
// +optional
|
||||
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"`
|
||||
// hostPorts determines which host port ranges are allowed to be exposed.
|
||||
// +optional
|
||||
HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"`
|
||||
// hostPID determines if the policy allows the use of HostPID in the pod spec.
|
||||
// +optional
|
||||
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"`
|
||||
// hostIPC determines if the policy allows the use of HostIPC in the pod spec.
|
||||
// +optional
|
||||
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"`
|
||||
// seLinux is the strategy that will dictate the allowable labels that may be set.
|
||||
SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"`
|
||||
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"`
|
||||
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
|
||||
// RunAsGroup feature gate to be enabled.
|
||||
// +optional
|
||||
RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"`
|
||||
// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
|
||||
SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"`
|
||||
// fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
|
||||
FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"`
|
||||
// readOnlyRootFilesystem when set to true will force containers to run with a read only root file
|
||||
// system. If the container specifically requests to run with a non-read only root file system
|
||||
// the PSP should deny the pod.
|
||||
// If set to false the container may run with a read only root file system if it wishes but it
|
||||
// will not be forced to.
|
||||
// +optional
|
||||
ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"`
|
||||
// defaultAllowPrivilegeEscalation controls the default setting for whether a
|
||||
// process can gain more privileges than its parent process.
|
||||
// +optional
|
||||
DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"`
|
||||
// allowPrivilegeEscalation determines if a pod can request to allow
|
||||
// privilege escalation. If unspecified, defaults to true.
|
||||
// +optional
|
||||
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"`
|
||||
// allowedHostPaths is an allowlist of host paths. Empty indicates
|
||||
// that all host paths may be used.
|
||||
// +optional
|
||||
AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"`
|
||||
// allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all
|
||||
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
|
||||
// is allowed in the "volumes" field.
|
||||
// +optional
|
||||
AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"`
|
||||
// AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec.
|
||||
// An empty value indicates that any CSI driver can be used for inline ephemeral volumes.
|
||||
// +optional
|
||||
AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"`
|
||||
// allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
|
||||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
|
||||
// Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
|
||||
// +optional
|
||||
AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"`
|
||||
// forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
|
||||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
|
||||
// +optional
|
||||
ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"`
|
||||
// AllowedProcMountTypes is an allowlist of allowed ProcMountTypes.
|
||||
// Empty or nil indicates that only the DefaultProcMountType may be used.
|
||||
// This requires the ProcMountType feature flag to be enabled.
|
||||
// +optional
|
||||
AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"`
|
||||
// runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod.
|
||||
// If this field is omitted, the pod's runtimeClassName field is unrestricted.
|
||||
// Enforcement of this field depends on the RuntimeClass feature gate being enabled.
|
||||
// +optional
|
||||
RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"`
|
||||
}
|
||||
|
||||
// AllowedHostPath defines the host volume conditions that will be enabled by a policy
|
||||
// for pods to use. It requires the path prefix to be defined.
|
||||
type AllowedHostPath struct {
|
||||
// pathPrefix is the path prefix that the host volume must match.
|
||||
// It does not support `*`.
|
||||
// Trailing slashes are trimmed when validating the path prefix with a host path.
|
||||
//
|
||||
// Examples:
|
||||
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
|
||||
// `/foo` would not allow `/food` or `/etc/foo`
|
||||
PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"`
|
||||
|
||||
// when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
|
||||
// +optional
|
||||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities
|
||||
// field and means that any capabilities are allowed to be requested.
|
||||
var AllowAllCapabilities v1.Capability = "*"
|
||||
|
||||
// FSType gives strong typing to different file systems that are used by volumes.
|
||||
type FSType string
|
||||
|
||||
const (
|
||||
AzureFile FSType = "azureFile"
|
||||
Flocker FSType = "flocker"
|
||||
FlexVolume FSType = "flexVolume"
|
||||
HostPath FSType = "hostPath"
|
||||
EmptyDir FSType = "emptyDir"
|
||||
GCEPersistentDisk FSType = "gcePersistentDisk"
|
||||
AWSElasticBlockStore FSType = "awsElasticBlockStore"
|
||||
GitRepo FSType = "gitRepo"
|
||||
Secret FSType = "secret"
|
||||
NFS FSType = "nfs"
|
||||
ISCSI FSType = "iscsi"
|
||||
Glusterfs FSType = "glusterfs"
|
||||
PersistentVolumeClaim FSType = "persistentVolumeClaim"
|
||||
RBD FSType = "rbd"
|
||||
Cinder FSType = "cinder"
|
||||
CephFS FSType = "cephFS"
|
||||
DownwardAPI FSType = "downwardAPI"
|
||||
FC FSType = "fc"
|
||||
ConfigMap FSType = "configMap"
|
||||
VsphereVolume FSType = "vsphereVolume"
|
||||
Quobyte FSType = "quobyte"
|
||||
AzureDisk FSType = "azureDisk"
|
||||
PhotonPersistentDisk FSType = "photonPersistentDisk"
|
||||
StorageOS FSType = "storageos"
|
||||
Projected FSType = "projected"
|
||||
PortworxVolume FSType = "portworxVolume"
|
||||
ScaleIO FSType = "scaleIO"
|
||||
CSI FSType = "csi"
|
||||
Ephemeral FSType = "ephemeral"
|
||||
All FSType = "*"
|
||||
)
|
||||
|
||||
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
|
||||
type AllowedFlexVolume struct {
|
||||
// driver is the name of the Flexvolume driver.
|
||||
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
|
||||
}
|
||||
|
||||
// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.
|
||||
type AllowedCSIDriver struct {
|
||||
// Name is the registered name of the CSI driver
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
}
|
||||
|
||||
// HostPortRange defines a range of host ports that will be enabled by a policy
|
||||
// for pods to use. It requires both the start and end to be defined.
|
||||
type HostPortRange struct {
|
||||
// min is the start of the range, inclusive.
|
||||
Min int32 `json:"min" protobuf:"varint,1,opt,name=min"`
|
||||
// max is the end of the range, inclusive.
|
||||
Max int32 `json:"max" protobuf:"varint,2,opt,name=max"`
|
||||
}
|
||||
|
||||
// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
type SELinuxStrategyOptions struct {
|
||||
// rule is the strategy that will dictate the allowable labels that may be set.
|
||||
Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"`
|
||||
// seLinuxOptions required to run as; required for MustRunAs
|
||||
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
|
||||
// +optional
|
||||
SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"`
|
||||
}
|
||||
|
||||
// SELinuxStrategy denotes strategy types for generating SELinux options for a
|
||||
// Security Context.
|
||||
type SELinuxStrategy string
|
||||
|
||||
const (
|
||||
// SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied.
|
||||
SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
|
||||
// SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels.
|
||||
SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
|
||||
)
|
||||
|
||||
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
type RunAsUserStrategyOptions struct {
|
||||
// rule is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"`
|
||||
// ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
type RunAsGroupStrategyOptions struct {
|
||||
// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"`
|
||||
// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// IDRange provides a min/max of an allowed range of IDs.
|
||||
type IDRange struct {
|
||||
// min is the start of the range, inclusive.
|
||||
Min int64 `json:"min" protobuf:"varint,1,opt,name=min"`
|
||||
// max is the end of the range, inclusive.
|
||||
Max int64 `json:"max" protobuf:"varint,2,opt,name=max"`
|
||||
}
|
||||
|
||||
// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
|
||||
// Security Context.
|
||||
type RunAsUserStrategy string
|
||||
|
||||
const (
|
||||
// RunAsUserStrategyMustRunAs means that container must run as a particular uid.
|
||||
RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
|
||||
// RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid.
|
||||
RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
|
||||
// RunAsUserStrategyRunAsAny means that container may make requests for any uid.
|
||||
RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
|
||||
)
|
||||
|
||||
// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a
|
||||
// Security Context.
|
||||
type RunAsGroupStrategy string
|
||||
|
||||
const (
|
||||
// RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid.
|
||||
// However, when RunAsGroup are specified, they have to fall in the defined range.
|
||||
RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs"
|
||||
// RunAsGroupStrategyMustRunAs means that container must run as a particular gid.
|
||||
RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs"
|
||||
// RunAsUserStrategyRunAsAny means that container may make requests for any gid.
|
||||
RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny"
|
||||
)
|
||||
|
||||
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
|
||||
type FSGroupStrategyOptions struct {
|
||||
// rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
|
||||
// +optional
|
||||
Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"`
|
||||
// ranges are the allowed ranges of fs groups. If you would like to force a single
|
||||
// fs group then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
|
||||
// SecurityContext
|
||||
type FSGroupStrategyType string
|
||||
|
||||
const (
|
||||
// FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied.
|
||||
// However, when FSGroups are specified, they have to fall in the defined range.
|
||||
FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs"
|
||||
// FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied.
|
||||
FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
|
||||
// FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels.
|
||||
FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
|
||||
)
|
||||
|
||||
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
|
||||
type SupplementalGroupsStrategyOptions struct {
|
||||
// rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
|
||||
// +optional
|
||||
Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"`
|
||||
// ranges are the allowed ranges of supplemental groups. If you would like to force a single
|
||||
// supplemental group then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
|
||||
// groups for a SecurityContext.
|
||||
type SupplementalGroupsStrategyType string
|
||||
|
||||
const (
|
||||
// SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid.
|
||||
// However, when gids are specified, they have to fall in the defined range.
|
||||
SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs"
|
||||
// SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid.
|
||||
SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
|
||||
// SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid.
|
||||
SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
|
||||
)
|
||||
|
||||
// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses
|
||||
// for a pod.
|
||||
type RuntimeClassStrategyOptions struct {
|
||||
// allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod.
|
||||
// A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the
|
||||
// list. An empty list requires the RuntimeClassName field to be unset.
|
||||
AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"`
|
||||
// defaultRuntimeClassName is the default RuntimeClassName to set on the pod.
|
||||
// The default MUST be allowed by the allowedRuntimeClassNames list.
|
||||
// A value of nil does not mutate the Pod.
|
||||
// +optional
|
||||
DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"`
|
||||
}
|
||||
|
||||
// AllowAllRuntimeClassNames can be used as a value for the
|
||||
// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is
|
||||
// allowed.
|
||||
const AllowAllRuntimeClassNames = "*"
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.10
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.21
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.25
|
||||
|
||||
// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
|
||||
type PodSecurityPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// items is a list of schema objects.
|
||||
Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
160
vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
generated
vendored
160
vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
generated
vendored
@ -27,34 +27,6 @@ package v1beta1
|
||||
// Those methods can be generated by using hack/update-codegen.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AllowedCSIDriver = map[string]string{
|
||||
"": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.",
|
||||
"name": "Name is the registered name of the CSI driver",
|
||||
}
|
||||
|
||||
func (AllowedCSIDriver) SwaggerDoc() map[string]string {
|
||||
return map_AllowedCSIDriver
|
||||
}
|
||||
|
||||
var map_AllowedFlexVolume = map[string]string{
|
||||
"": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.",
|
||||
"driver": "driver is the name of the Flexvolume driver.",
|
||||
}
|
||||
|
||||
func (AllowedFlexVolume) SwaggerDoc() map[string]string {
|
||||
return map_AllowedFlexVolume
|
||||
}
|
||||
|
||||
var map_AllowedHostPath = map[string]string{
|
||||
"": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.",
|
||||
"pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`",
|
||||
"readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.",
|
||||
}
|
||||
|
||||
func (AllowedHostPath) SwaggerDoc() map[string]string {
|
||||
return map_AllowedHostPath
|
||||
}
|
||||
|
||||
var map_Eviction = map[string]string{
|
||||
"": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods/<pod name>/evictions.",
|
||||
"metadata": "ObjectMeta describes the pod that is being evicted.",
|
||||
@ -65,36 +37,6 @@ func (Eviction) SwaggerDoc() map[string]string {
|
||||
return map_Eviction
|
||||
}
|
||||
|
||||
var map_FSGroupStrategyOptions = map[string]string{
|
||||
"": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.",
|
||||
"rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.",
|
||||
"ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.",
|
||||
}
|
||||
|
||||
func (FSGroupStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_FSGroupStrategyOptions
|
||||
}
|
||||
|
||||
var map_HostPortRange = map[string]string{
|
||||
"": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.",
|
||||
"min": "min is the start of the range, inclusive.",
|
||||
"max": "max is the end of the range, inclusive.",
|
||||
}
|
||||
|
||||
func (HostPortRange) SwaggerDoc() map[string]string {
|
||||
return map_HostPortRange
|
||||
}
|
||||
|
||||
var map_IDRange = map[string]string{
|
||||
"": "IDRange provides a min/max of an allowed range of IDs.",
|
||||
"min": "min is the start of the range, inclusive.",
|
||||
"max": "max is the end of the range, inclusive.",
|
||||
}
|
||||
|
||||
func (IDRange) SwaggerDoc() map[string]string {
|
||||
return map_IDRange
|
||||
}
|
||||
|
||||
var map_PodDisruptionBudget = map[string]string{
|
||||
"": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
@ -143,106 +85,4 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string {
|
||||
return map_PodDisruptionBudgetStatus
|
||||
}
|
||||
|
||||
var map_PodSecurityPolicy = map[string]string{
|
||||
"": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"spec": "spec defines the policy enforced.",
|
||||
}
|
||||
|
||||
func (PodSecurityPolicy) SwaggerDoc() map[string]string {
|
||||
return map_PodSecurityPolicy
|
||||
}
|
||||
|
||||
var map_PodSecurityPolicyList = map[string]string{
|
||||
"": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "items is a list of schema objects.",
|
||||
}
|
||||
|
||||
func (PodSecurityPolicyList) SwaggerDoc() map[string]string {
|
||||
return map_PodSecurityPolicyList
|
||||
}
|
||||
|
||||
var map_PodSecurityPolicySpec = map[string]string{
|
||||
"": "PodSecurityPolicySpec defines the policy enforced.",
|
||||
"privileged": "privileged determines if a pod can request to be run as privileged.",
|
||||
"defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.",
|
||||
"requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.",
|
||||
"allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.",
|
||||
"volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.",
|
||||
"hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.",
|
||||
"hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.",
|
||||
"hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.",
|
||||
"hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.",
|
||||
"seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.",
|
||||
"runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.",
|
||||
"runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.",
|
||||
"supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
|
||||
"fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
|
||||
"readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
|
||||
"defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.",
|
||||
"allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.",
|
||||
"allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.",
|
||||
"allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.",
|
||||
"allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.",
|
||||
"allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.",
|
||||
"forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.",
|
||||
"allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.",
|
||||
"runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.",
|
||||
}
|
||||
|
||||
func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
|
||||
return map_PodSecurityPolicySpec
|
||||
}
|
||||
|
||||
var map_RunAsGroupStrategyOptions = map[string]string{
|
||||
"": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.",
|
||||
"rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.",
|
||||
"ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.",
|
||||
}
|
||||
|
||||
func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_RunAsGroupStrategyOptions
|
||||
}
|
||||
|
||||
var map_RunAsUserStrategyOptions = map[string]string{
|
||||
"": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.",
|
||||
"rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.",
|
||||
"ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.",
|
||||
}
|
||||
|
||||
func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_RunAsUserStrategyOptions
|
||||
}
|
||||
|
||||
var map_RuntimeClassStrategyOptions = map[string]string{
|
||||
"": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.",
|
||||
"allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.",
|
||||
"defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.",
|
||||
}
|
||||
|
||||
func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_RuntimeClassStrategyOptions
|
||||
}
|
||||
|
||||
var map_SELinuxStrategyOptions = map[string]string{
|
||||
"": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.",
|
||||
"rule": "rule is the strategy that will dictate the allowable labels that may be set.",
|
||||
"seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
|
||||
}
|
||||
|
||||
func (SELinuxStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_SELinuxStrategyOptions
|
||||
}
|
||||
|
||||
var map_SupplementalGroupsStrategyOptions = map[string]string{
|
||||
"": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.",
|
||||
"rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.",
|
||||
"ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.",
|
||||
}
|
||||
|
||||
func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_SupplementalGroupsStrategyOptions
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
367
vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go
generated
vendored
367
vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go
generated
vendored
@ -22,60 +22,11 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver.
|
||||
func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AllowedCSIDriver)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
|
||||
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AllowedFlexVolume)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath.
|
||||
func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AllowedHostPath)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Eviction) DeepCopyInto(out *Eviction) {
|
||||
*out = *in
|
||||
@ -107,59 +58,6 @@ func (in *Eviction) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
|
||||
*out = *in
|
||||
if in.Ranges != nil {
|
||||
in, out := &in.Ranges, &out.Ranges
|
||||
*out = make([]IDRange, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
|
||||
func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FSGroupStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HostPortRange) DeepCopyInto(out *HostPortRange) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange.
|
||||
func (in *HostPortRange) DeepCopy() *HostPortRange {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HostPortRange)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IDRange) DeepCopyInto(out *IDRange) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
|
||||
func (in *IDRange) DeepCopy() *IDRange {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IDRange)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) {
|
||||
*out = *in
|
||||
@ -286,268 +184,3 @@ func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy.
|
||||
func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodSecurityPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]PodSecurityPolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList.
|
||||
func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodSecurityPolicyList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
|
||||
*out = *in
|
||||
if in.DefaultAddCapabilities != nil {
|
||||
in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
|
||||
*out = make([]corev1.Capability, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RequiredDropCapabilities != nil {
|
||||
in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
|
||||
*out = make([]corev1.Capability, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AllowedCapabilities != nil {
|
||||
in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
|
||||
*out = make([]corev1.Capability, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Volumes != nil {
|
||||
in, out := &in.Volumes, &out.Volumes
|
||||
*out = make([]FSType, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.HostPorts != nil {
|
||||
in, out := &in.HostPorts, &out.HostPorts
|
||||
*out = make([]HostPortRange, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.SELinux.DeepCopyInto(&out.SELinux)
|
||||
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
|
||||
if in.RunAsGroup != nil {
|
||||
in, out := &in.RunAsGroup, &out.RunAsGroup
|
||||
*out = new(RunAsGroupStrategyOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
|
||||
in.FSGroup.DeepCopyInto(&out.FSGroup)
|
||||
if in.DefaultAllowPrivilegeEscalation != nil {
|
||||
in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.AllowPrivilegeEscalation != nil {
|
||||
in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.AllowedHostPaths != nil {
|
||||
in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
|
||||
*out = make([]AllowedHostPath, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AllowedFlexVolumes != nil {
|
||||
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
|
||||
*out = make([]AllowedFlexVolume, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AllowedCSIDrivers != nil {
|
||||
in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers
|
||||
*out = make([]AllowedCSIDriver, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AllowedUnsafeSysctls != nil {
|
||||
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ForbiddenSysctls != nil {
|
||||
in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AllowedProcMountTypes != nil {
|
||||
in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes
|
||||
*out = make([]corev1.ProcMountType, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RuntimeClass != nil {
|
||||
in, out := &in.RuntimeClass, &out.RuntimeClass
|
||||
*out = new(RuntimeClassStrategyOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec.
|
||||
func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodSecurityPolicySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) {
|
||||
*out = *in
|
||||
if in.Ranges != nil {
|
||||
in, out := &in.Ranges, &out.Ranges
|
||||
*out = make([]IDRange, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions.
|
||||
func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunAsGroupStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
|
||||
*out = *in
|
||||
if in.Ranges != nil {
|
||||
in, out := &in.Ranges, &out.Ranges
|
||||
*out = make([]IDRange, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
|
||||
func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunAsUserStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) {
|
||||
*out = *in
|
||||
if in.AllowedRuntimeClassNames != nil {
|
||||
in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.DefaultRuntimeClassName != nil {
|
||||
in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions.
|
||||
func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RuntimeClassStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
|
||||
*out = *in
|
||||
if in.SELinuxOptions != nil {
|
||||
in, out := &in.SELinuxOptions, &out.SELinuxOptions
|
||||
*out = new(corev1.SELinuxOptions)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions.
|
||||
func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SELinuxStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
|
||||
*out = *in
|
||||
if in.Ranges != nil {
|
||||
in, out := &in.Ranges, &out.Ranges
|
||||
*out = make([]IDRange, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
|
||||
func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SupplementalGroupsStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
36
vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go
generated
vendored
36
vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -90,39 +90,3 @@ func (in *PodDisruptionBudgetList) APILifecycleReplacement() schema.GroupVersion
|
||||
func (in *PodDisruptionBudgetList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 25
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 10
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 21
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 25
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 10
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 21
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 25
|
||||
}
|
||||
|
4
vendor/k8s.io/api/resource/v1alpha2/generated.proto
generated
vendored
4
vendor/k8s.io/api/resource/v1alpha2/generated.proto
generated
vendored
@ -107,7 +107,7 @@ message PodSchedulingContextSpec {
|
||||
// that suits all pending resources. This may get increased in the
|
||||
// future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated string potentialNodes = 2;
|
||||
}
|
||||
@ -208,7 +208,7 @@ message ResourceClaimSchedulingStatus {
|
||||
// PodSchedulingSpec.PotentialNodes. This may get increased in the
|
||||
// future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated string unsuitableNodes = 2;
|
||||
}
|
||||
|
4
vendor/k8s.io/api/resource/v1alpha2/types.go
generated
vendored
4
vendor/k8s.io/api/resource/v1alpha2/types.go
generated
vendored
@ -248,7 +248,7 @@ type PodSchedulingContextSpec struct {
|
||||
// that suits all pending resources. This may get increased in the
|
||||
// future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
|
||||
}
|
||||
@ -283,7 +283,7 @@ type ResourceClaimSchedulingStatus struct {
|
||||
// PodSchedulingSpec.PotentialNodes. This may get increased in the
|
||||
// future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"`
|
||||
}
|
||||
|
2
vendor/k8s.io/api/storage/v1/generated.proto
generated
vendored
2
vendor/k8s.io/api/storage/v1/generated.proto
generated
vendored
@ -88,7 +88,7 @@ message CSIDriverSpec {
|
||||
// If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
|
||||
// The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
|
||||
//
|
||||
// The following VolumeConext will be passed if podInfoOnMount is set to true.
|
||||
// The following VolumeContext will be passed if podInfoOnMount is set to true.
|
||||
// This list might grow, but the prefix will be used.
|
||||
// "csi.storage.k8s.io/pod.name": pod.Name
|
||||
// "csi.storage.k8s.io/pod.namespace": pod.Namespace
|
||||
|
2
vendor/k8s.io/api/storage/v1/types.go
generated
vendored
2
vendor/k8s.io/api/storage/v1/types.go
generated
vendored
@ -291,7 +291,7 @@ type CSIDriverSpec struct {
|
||||
// If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
|
||||
// The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
|
||||
//
|
||||
// The following VolumeConext will be passed if podInfoOnMount is set to true.
|
||||
// The following VolumeContext will be passed if podInfoOnMount is set to true.
|
||||
// This list might grow, but the prefix will be used.
|
||||
// "csi.storage.k8s.io/pod.name": pod.Name
|
||||
// "csi.storage.k8s.io/pod.namespace": pod.Namespace
|
||||
|
2
vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
generated
vendored
@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string {
|
||||
var map_CSIDriverSpec = map[string]string{
|
||||
"": "CSIDriverSpec is the specification of a CSIDriver.",
|
||||
"attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.",
|
||||
"podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.",
|
||||
"podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.",
|
||||
"volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.",
|
||||
"storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.",
|
||||
"fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.",
|
||||
|
729
vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
generated
vendored
729
vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
generated
vendored
@ -243,10 +243,66 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() {
|
||||
|
||||
var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo
|
||||
|
||||
func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} }
|
||||
func (*VolumeAttributesClass) ProtoMessage() {}
|
||||
func (*VolumeAttributesClass) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_10f856db1e670dc4, []int{7}
|
||||
}
|
||||
func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VolumeAttributesClass.Merge(m, src)
|
||||
}
|
||||
func (m *VolumeAttributesClass) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *VolumeAttributesClass) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo
|
||||
|
||||
func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} }
|
||||
func (*VolumeAttributesClassList) ProtoMessage() {}
|
||||
func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_10f856db1e670dc4, []int{8}
|
||||
}
|
||||
func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VolumeAttributesClassList.Merge(m, src)
|
||||
}
|
||||
func (m *VolumeAttributesClassList) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *VolumeAttributesClassList) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo
|
||||
|
||||
func (m *VolumeError) Reset() { *m = VolumeError{} }
|
||||
func (*VolumeError) ProtoMessage() {}
|
||||
func (*VolumeError) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_10f856db1e670dc4, []int{7}
|
||||
return fileDescriptor_10f856db1e670dc4, []int{9}
|
||||
}
|
||||
func (m *VolumeError) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@ -280,6 +336,9 @@ func init() {
|
||||
proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec")
|
||||
proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus")
|
||||
proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry")
|
||||
proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass")
|
||||
proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass.ParametersEntry")
|
||||
proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClassList")
|
||||
proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError")
|
||||
}
|
||||
|
||||
@ -288,65 +347,71 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptor_10f856db1e670dc4 = []byte{
|
||||
// 925 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3f, 0x6f, 0x23, 0x45,
|
||||
0x14, 0xf7, 0xc6, 0xce, 0x9d, 0x6f, 0x1c, 0xc0, 0x37, 0x32, 0x87, 0xe5, 0x93, 0xd6, 0x91, 0x2b,
|
||||
0x83, 0xb8, 0x59, 0x72, 0x20, 0x74, 0xa2, 0xf3, 0x26, 0x29, 0x22, 0x92, 0x00, 0xe3, 0x08, 0x21,
|
||||
0xa0, 0x60, 0xbc, 0x7e, 0xd8, 0x13, 0x7b, 0xff, 0x68, 0x67, 0x36, 0xc2, 0x54, 0x54, 0xd4, 0x74,
|
||||
0x7c, 0x03, 0x3e, 0x4b, 0x0a, 0x24, 0x4e, 0x54, 0x57, 0x59, 0x64, 0xf9, 0x0e, 0x14, 0x34, 0xa0,
|
||||
0x9d, 0x1d, 0xaf, 0x37, 0x5e, 0x27, 0xe7, 0x4b, 0x71, 0x9d, 0xdf, 0x9b, 0xf7, 0x7e, 0xbf, 0xf7,
|
||||
0xdf, 0x8b, 0x0e, 0x26, 0xcf, 0x04, 0xe1, 0xbe, 0x35, 0x89, 0x06, 0x10, 0x7a, 0x20, 0x41, 0x58,
|
||||
0x17, 0xe0, 0x0d, 0xfd, 0xd0, 0xd2, 0x0f, 0x2c, 0xe0, 0x96, 0x90, 0x7e, 0xc8, 0x46, 0x60, 0x5d,
|
||||
0xec, 0xb1, 0x69, 0x30, 0x66, 0x7b, 0xd6, 0x08, 0x3c, 0x08, 0x99, 0x84, 0x21, 0x09, 0x42, 0x5f,
|
||||
0xfa, 0xf8, 0x71, 0x6a, 0x4c, 0x58, 0xc0, 0x89, 0x36, 0x26, 0x0b, 0xe3, 0xd6, 0x93, 0x11, 0x97,
|
||||
0xe3, 0x68, 0x40, 0x1c, 0xdf, 0xb5, 0x46, 0xfe, 0xc8, 0xb7, 0x94, 0xcf, 0x20, 0xfa, 0x5e, 0x49,
|
||||
0x4a, 0x50, 0xbf, 0x52, 0xac, 0x56, 0x27, 0x47, 0xec, 0xf8, 0x61, 0xc2, 0xba, 0xca, 0xd7, 0xfa,
|
||||
0x68, 0x69, 0xe3, 0x32, 0x67, 0xcc, 0x3d, 0x08, 0x67, 0x56, 0x30, 0x19, 0x29, 0xa7, 0x10, 0x84,
|
||||
0x1f, 0x85, 0x0e, 0xbc, 0x92, 0x97, 0xb0, 0x5c, 0x90, 0x6c, 0x1d, 0x97, 0x75, 0x93, 0x57, 0x18,
|
||||
0x79, 0x92, 0xbb, 0x45, 0x9a, 0x8f, 0x5f, 0xe6, 0x20, 0x9c, 0x31, 0xb8, 0x6c, 0xd5, 0xaf, 0xf3,
|
||||
0x4f, 0x19, 0xe1, 0xfd, 0xfe, 0x51, 0x3f, 0xad, 0xdf, 0x3e, 0x0b, 0x98, 0xc3, 0xe5, 0x0c, 0x7f,
|
||||
0x87, 0xaa, 0x49, 0x68, 0x43, 0x26, 0x59, 0xd3, 0xd8, 0x35, 0xba, 0xb5, 0xa7, 0x1f, 0x90, 0x65,
|
||||
0xb9, 0x33, 0x06, 0x12, 0x4c, 0x46, 0x89, 0x42, 0x90, 0xc4, 0x9a, 0x5c, 0xec, 0x91, 0xcf, 0x06,
|
||||
0xe7, 0xe0, 0xc8, 0x13, 0x90, 0xcc, 0xc6, 0x97, 0xf3, 0x76, 0x29, 0x9e, 0xb7, 0xd1, 0x52, 0x47,
|
||||
0x33, 0x54, 0xcc, 0xd1, 0x8e, 0xe7, 0x0f, 0xe1, 0xcc, 0x0f, 0xfc, 0xa9, 0x3f, 0x9a, 0x35, 0xb7,
|
||||
0x14, 0xcb, 0x87, 0x9b, 0xb1, 0x1c, 0xb3, 0x01, 0x4c, 0xfb, 0x30, 0x05, 0x47, 0xfa, 0xa1, 0x5d,
|
||||
0x8f, 0xe7, 0xed, 0x9d, 0xd3, 0x1c, 0x18, 0xbd, 0x06, 0x8d, 0x0f, 0x50, 0x5d, 0xcf, 0xc7, 0xfe,
|
||||
0x94, 0x09, 0x71, 0xca, 0x5c, 0x68, 0x96, 0x77, 0x8d, 0xee, 0x03, 0xbb, 0xa9, 0x43, 0xac, 0xf7,
|
||||
0x57, 0xde, 0x69, 0xc1, 0x03, 0x7f, 0x85, 0xaa, 0x8e, 0x2e, 0x4f, 0xb3, 0xa2, 0x82, 0x25, 0xb7,
|
||||
0x05, 0x4b, 0x16, 0x13, 0x41, 0xbe, 0x88, 0x98, 0x27, 0xb9, 0x9c, 0xd9, 0x3b, 0xf1, 0xbc, 0x5d,
|
||||
0x5d, 0x94, 0x98, 0x66, 0x68, 0x58, 0xa0, 0x87, 0x2e, 0xfb, 0x81, 0xbb, 0x91, 0xfb, 0xa5, 0x3f,
|
||||
0x8d, 0x5c, 0xe8, 0xf3, 0x1f, 0xa1, 0xb9, 0x7d, 0x27, 0x8a, 0xb7, 0xe3, 0x79, 0xfb, 0xe1, 0xc9,
|
||||
0x2a, 0x18, 0x2d, 0xe2, 0x77, 0x7e, 0x37, 0xd0, 0xa3, 0x62, 0xe3, 0x8f, 0xb9, 0x90, 0xf8, 0xdb,
|
||||
0x42, 0xf3, 0xc9, 0x86, 0x6d, 0xe1, 0x22, 0x6d, 0x7d, 0x5d, 0xd7, 0xb5, 0xba, 0xd0, 0xe4, 0x1a,
|
||||
0x7f, 0x86, 0xb6, 0xb9, 0x04, 0x57, 0x34, 0xb7, 0x76, 0xcb, 0xdd, 0xda, 0x53, 0x8b, 0xdc, 0xb2,
|
||||
0xc6, 0xa4, 0x18, 0xa1, 0xfd, 0x86, 0xc6, 0xde, 0x3e, 0x4a, 0x50, 0x68, 0x0a, 0xd6, 0xf9, 0x6d,
|
||||
0x0b, 0xd5, 0xd3, 0xec, 0x7a, 0x52, 0x32, 0x67, 0xec, 0x82, 0x27, 0x5f, 0xc3, 0x14, 0xf7, 0x51,
|
||||
0x45, 0x04, 0xe0, 0xe8, 0xe9, 0xdd, 0xbb, 0x35, 0x97, 0xd5, 0xf0, 0xfa, 0x01, 0x38, 0xf6, 0x8e,
|
||||
0x86, 0xaf, 0x24, 0x12, 0x55, 0x60, 0xf8, 0x1b, 0x74, 0x4f, 0x48, 0x26, 0x23, 0xa1, 0xa6, 0xf4,
|
||||
0xfa, 0x52, 0x6c, 0x00, 0xab, 0x5c, 0xed, 0x37, 0x35, 0xf0, 0xbd, 0x54, 0xa6, 0x1a, 0xb2, 0x73,
|
||||
0x69, 0xa0, 0xc6, 0xaa, 0xcb, 0x6b, 0xe8, 0x3a, 0xbd, 0xde, 0xf5, 0x27, 0xaf, 0x94, 0xd2, 0x0d,
|
||||
0x3d, 0xff, 0xd3, 0x40, 0x8f, 0x0a, 0xd9, 0xab, 0x85, 0xc0, 0xc7, 0xa8, 0x11, 0x40, 0x28, 0xb8,
|
||||
0x90, 0xe0, 0xc9, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, 0x6f, 0x37, 0x3e, 0x5f, 0xf3,
|
||||
0x4e, 0xd7, 0x7a, 0xe1, 0x73, 0x54, 0xe7, 0xde, 0x94, 0x7b, 0xa0, 0xf7, 0x67, 0xd9, 0xf1, 0x6e,
|
||||
0x3e, 0x8f, 0xe4, 0x8f, 0x23, 0x29, 0xc8, 0x2a, 0xb2, 0x6a, 0x74, 0x23, 0x39, 0x33, 0x47, 0x2b,
|
||||
0x28, 0xb4, 0x80, 0xdb, 0xf9, 0x63, 0x4d, 0x7f, 0x92, 0x07, 0xfc, 0x3e, 0xaa, 0x32, 0xa5, 0x81,
|
||||
0x50, 0xa7, 0x91, 0xd5, 0xbb, 0xa7, 0xf5, 0x34, 0xb3, 0x50, 0x33, 0xa4, 0x4a, 0xb1, 0xe6, 0xb0,
|
||||
0x6e, 0x30, 0x43, 0xca, 0x35, 0x37, 0x43, 0x4a, 0xa6, 0x1a, 0x32, 0x09, 0x25, 0x39, 0xb0, 0xb9,
|
||||
0x43, 0x9a, 0x85, 0x72, 0xaa, 0xf5, 0x34, 0xb3, 0xe8, 0xfc, 0x57, 0x5e, 0xd3, 0x26, 0x35, 0x8c,
|
||||
0xb9, 0x9c, 0x86, 0x2a, 0xa7, 0x6a, 0x21, 0xa7, 0x61, 0x96, 0xd3, 0x10, 0xff, 0x6a, 0x20, 0xcc,
|
||||
0x32, 0x88, 0x93, 0xc5, 0xb0, 0xa6, 0x13, 0xf5, 0xe9, 0x1d, 0x96, 0x84, 0xf4, 0x0a, 0x68, 0x87,
|
||||
0x9e, 0x0c, 0x67, 0x76, 0x4b, 0x47, 0x81, 0x8b, 0x06, 0x74, 0x4d, 0x08, 0xf8, 0x1c, 0xd5, 0x52,
|
||||
0xed, 0x61, 0x18, 0xfa, 0xa1, 0x5e, 0xdb, 0xee, 0x06, 0x11, 0x29, 0x7b, 0xdb, 0x8c, 0xe7, 0xed,
|
||||
0x5a, 0x6f, 0x09, 0xf0, 0xef, 0xbc, 0x5d, 0xcb, 0xbd, 0xd3, 0x3c, 0x78, 0xc2, 0x35, 0x84, 0x25,
|
||||
0x57, 0xe5, 0x2e, 0x5c, 0x07, 0x70, 0x33, 0x57, 0x0e, 0xbc, 0x75, 0x88, 0xde, 0xb9, 0xa1, 0x44,
|
||||
0xb8, 0x8e, 0xca, 0x13, 0x98, 0xa5, 0x93, 0x48, 0x93, 0x9f, 0xb8, 0x81, 0xb6, 0x2f, 0xd8, 0x34,
|
||||
0x4a, 0x27, 0xee, 0x01, 0x4d, 0x85, 0x4f, 0xb6, 0x9e, 0x19, 0x9d, 0x9f, 0x0d, 0x94, 0xe7, 0xc0,
|
||||
0xc7, 0xa8, 0x92, 0x7c, 0x93, 0xe8, 0x33, 0xf3, 0xde, 0x66, 0x67, 0xe6, 0x8c, 0xbb, 0xb0, 0x3c,
|
||||
0x97, 0x89, 0x44, 0x15, 0x0a, 0x7e, 0x17, 0xdd, 0x77, 0x41, 0x08, 0x36, 0xd2, 0xcc, 0xf6, 0x5b,
|
||||
0xda, 0xe8, 0xfe, 0x49, 0xaa, 0xa6, 0x8b, 0x77, 0xbb, 0x77, 0x79, 0x65, 0x96, 0x9e, 0x5f, 0x99,
|
||||
0xa5, 0x17, 0x57, 0x66, 0xe9, 0xa7, 0xd8, 0x34, 0x2e, 0x63, 0xd3, 0x78, 0x1e, 0x9b, 0xc6, 0x8b,
|
||||
0xd8, 0x34, 0xfe, 0x8a, 0x4d, 0xe3, 0x97, 0xbf, 0xcd, 0xd2, 0xd7, 0x8f, 0x6f, 0xf9, 0x0a, 0xfd,
|
||||
0x3f, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x8d, 0x17, 0x01, 0xbc, 0x0a, 0x00, 0x00,
|
||||
// 1023 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6e, 0x23, 0x45,
|
||||
0x17, 0x4e, 0xe7, 0x32, 0xe3, 0xa9, 0xe4, 0xff, 0xc7, 0x53, 0xca, 0x0c, 0xc6, 0x23, 0xb5, 0x23,
|
||||
0xaf, 0x0c, 0x62, 0xba, 0x49, 0x40, 0x68, 0x84, 0xc4, 0xc2, 0x9d, 0x64, 0x11, 0x91, 0x84, 0xa1,
|
||||
0x1c, 0x01, 0x02, 0x16, 0x94, 0xdb, 0x07, 0xbb, 0x62, 0xf7, 0x45, 0x55, 0xd5, 0x16, 0x66, 0xc5,
|
||||
0x8a, 0x35, 0x3b, 0xde, 0x80, 0x67, 0xc9, 0x02, 0x89, 0xd1, 0xac, 0x66, 0x65, 0x91, 0x86, 0x67,
|
||||
0x60, 0xc1, 0x06, 0xd4, 0xd5, 0xe5, 0x76, 0xc7, 0x6d, 0x07, 0x27, 0x8b, 0xec, 0x5c, 0xe7, 0xf2,
|
||||
0x9d, 0xdb, 0x77, 0x4e, 0x27, 0xe8, 0xa0, 0xff, 0x5c, 0x58, 0x2c, 0xb0, 0xfb, 0x51, 0x1b, 0xb8,
|
||||
0x0f, 0x12, 0x84, 0x3d, 0x04, 0xbf, 0x13, 0x70, 0x5b, 0x2b, 0x68, 0xc8, 0x6c, 0x21, 0x03, 0x4e,
|
||||
0xbb, 0x60, 0x0f, 0x77, 0xe9, 0x20, 0xec, 0xd1, 0x5d, 0xbb, 0x0b, 0x3e, 0x70, 0x2a, 0xa1, 0x63,
|
||||
0x85, 0x3c, 0x90, 0x01, 0x7e, 0x9a, 0x1a, 0x5b, 0x34, 0x64, 0x96, 0x36, 0xb6, 0x26, 0xc6, 0xd5,
|
||||
0x67, 0x5d, 0x26, 0x7b, 0x51, 0xdb, 0x72, 0x03, 0xcf, 0xee, 0x06, 0xdd, 0xc0, 0x56, 0x3e, 0xed,
|
||||
0xe8, 0x5b, 0xf5, 0x52, 0x0f, 0xf5, 0x2b, 0xc5, 0xaa, 0xd6, 0x73, 0x81, 0xdd, 0x80, 0x27, 0x51,
|
||||
0x67, 0xe3, 0x55, 0xdf, 0x9f, 0xda, 0x78, 0xd4, 0xed, 0x31, 0x1f, 0xf8, 0xc8, 0x0e, 0xfb, 0x5d,
|
||||
0xe5, 0xc4, 0x41, 0x04, 0x11, 0x77, 0xe1, 0x46, 0x5e, 0xc2, 0xf6, 0x40, 0xd2, 0x79, 0xb1, 0xec,
|
||||
0x45, 0x5e, 0x3c, 0xf2, 0x25, 0xf3, 0x8a, 0x61, 0x3e, 0xf8, 0x2f, 0x07, 0xe1, 0xf6, 0xc0, 0xa3,
|
||||
0xb3, 0x7e, 0xf5, 0xbf, 0xd6, 0x10, 0xde, 0x6f, 0x1d, 0xb5, 0xd2, 0xfe, 0xed, 0xd3, 0x90, 0xba,
|
||||
0x4c, 0x8e, 0xf0, 0x37, 0xa8, 0x94, 0xa4, 0xd6, 0xa1, 0x92, 0x56, 0x8c, 0x1d, 0xa3, 0xb1, 0xb9,
|
||||
0xf7, 0xae, 0x35, 0x6d, 0x77, 0x16, 0xc1, 0x0a, 0xfb, 0xdd, 0x44, 0x20, 0xac, 0xc4, 0xda, 0x1a,
|
||||
0xee, 0x5a, 0x9f, 0xb4, 0xcf, 0xc1, 0x95, 0x27, 0x20, 0xa9, 0x83, 0x2f, 0xc6, 0xb5, 0x95, 0x78,
|
||||
0x5c, 0x43, 0x53, 0x19, 0xc9, 0x50, 0x31, 0x43, 0x5b, 0x7e, 0xd0, 0x81, 0xb3, 0x20, 0x0c, 0x06,
|
||||
0x41, 0x77, 0x54, 0x59, 0x55, 0x51, 0xde, 0x5b, 0x2e, 0xca, 0x31, 0x6d, 0xc3, 0xa0, 0x05, 0x03,
|
||||
0x70, 0x65, 0xc0, 0x9d, 0x72, 0x3c, 0xae, 0x6d, 0x9d, 0xe6, 0xc0, 0xc8, 0x15, 0x68, 0x7c, 0x80,
|
||||
0xca, 0x9a, 0x1f, 0xfb, 0x03, 0x2a, 0xc4, 0x29, 0xf5, 0xa0, 0xb2, 0xb6, 0x63, 0x34, 0x1e, 0x38,
|
||||
0x15, 0x9d, 0x62, 0xb9, 0x35, 0xa3, 0x27, 0x05, 0x0f, 0xfc, 0x05, 0x2a, 0xb9, 0xba, 0x3d, 0x95,
|
||||
0x75, 0x95, 0xac, 0x75, 0x5d, 0xb2, 0xd6, 0x84, 0x11, 0xd6, 0xa7, 0x11, 0xf5, 0x25, 0x93, 0x23,
|
||||
0x67, 0x2b, 0x1e, 0xd7, 0x4a, 0x93, 0x16, 0x93, 0x0c, 0x0d, 0x0b, 0xf4, 0xc8, 0xa3, 0xdf, 0x31,
|
||||
0x2f, 0xf2, 0x3e, 0x0b, 0x06, 0x91, 0x07, 0x2d, 0xf6, 0x3d, 0x54, 0x36, 0x6e, 0x15, 0xe2, 0x71,
|
||||
0x3c, 0xae, 0x3d, 0x3a, 0x99, 0x05, 0x23, 0x45, 0xfc, 0xfa, 0xaf, 0x06, 0x7a, 0x52, 0x1c, 0xfc,
|
||||
0x31, 0x13, 0x12, 0x7f, 0x5d, 0x18, 0xbe, 0xb5, 0xe4, 0x58, 0x98, 0x48, 0x47, 0x5f, 0xd6, 0x7d,
|
||||
0x2d, 0x4d, 0x24, 0xb9, 0xc1, 0x9f, 0xa1, 0x0d, 0x26, 0xc1, 0x13, 0x95, 0xd5, 0x9d, 0xb5, 0xc6,
|
||||
0xe6, 0x9e, 0x6d, 0x5d, 0xb3, 0xc6, 0x56, 0x31, 0x43, 0xe7, 0x7f, 0x1a, 0x7b, 0xe3, 0x28, 0x41,
|
||||
0x21, 0x29, 0x58, 0xfd, 0x97, 0x55, 0x54, 0x4e, 0xab, 0x6b, 0x4a, 0x49, 0xdd, 0x9e, 0x07, 0xbe,
|
||||
0xbc, 0x03, 0x16, 0xb7, 0xd0, 0xba, 0x08, 0xc1, 0xd5, 0xec, 0xdd, 0xbd, 0xb6, 0x96, 0xd9, 0xf4,
|
||||
0x5a, 0x21, 0xb8, 0xce, 0x96, 0x86, 0x5f, 0x4f, 0x5e, 0x44, 0x81, 0xe1, 0xaf, 0xd0, 0x3d, 0x21,
|
||||
0xa9, 0x8c, 0x84, 0x62, 0xe9, 0xd5, 0xa5, 0x58, 0x02, 0x56, 0xb9, 0x3a, 0xff, 0xd7, 0xc0, 0xf7,
|
||||
0xd2, 0x37, 0xd1, 0x90, 0xf5, 0x0b, 0x03, 0x6d, 0xcf, 0xba, 0xdc, 0xc1, 0xd4, 0xc9, 0xd5, 0xa9,
|
||||
0x3f, 0xbb, 0x51, 0x49, 0x0b, 0x66, 0xfe, 0xca, 0x40, 0x4f, 0x0a, 0xd5, 0xab, 0x85, 0xc0, 0xc7,
|
||||
0x68, 0x3b, 0x04, 0x2e, 0x98, 0x90, 0xe0, 0xcb, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c,
|
||||
0xae, 0x6d, 0xbf, 0x98, 0xa3, 0x27, 0x73, 0xbd, 0xf0, 0x39, 0x2a, 0x33, 0x7f, 0xc0, 0x7c, 0xd0,
|
||||
0xfb, 0x33, 0x9d, 0x78, 0x23, 0x5f, 0x47, 0xf2, 0xe1, 0x48, 0x1a, 0x32, 0x8b, 0xac, 0x06, 0xbd,
|
||||
0x9d, 0x9c, 0x99, 0xa3, 0x19, 0x14, 0x52, 0xc0, 0xad, 0xff, 0x36, 0x67, 0x3e, 0x89, 0x02, 0xbf,
|
||||
0x83, 0x4a, 0x54, 0x49, 0x80, 0xeb, 0x32, 0xb2, 0x7e, 0x37, 0xb5, 0x9c, 0x64, 0x16, 0x8a, 0x43,
|
||||
0xaa, 0x15, 0x73, 0x0e, 0xeb, 0x12, 0x1c, 0x52, 0xae, 0x39, 0x0e, 0xa9, 0x37, 0xd1, 0x90, 0x49,
|
||||
0x2a, 0xc9, 0x81, 0xcd, 0x1d, 0xd2, 0x2c, 0x95, 0x53, 0x2d, 0x27, 0x99, 0x45, 0xfd, 0x9f, 0xb5,
|
||||
0x39, 0x63, 0x52, 0x64, 0xcc, 0xd5, 0xd4, 0x51, 0x35, 0x95, 0x0a, 0x35, 0x75, 0xb2, 0x9a, 0x3a,
|
||||
0xf8, 0x67, 0x03, 0x61, 0x9a, 0x41, 0x9c, 0x4c, 0xc8, 0x9a, 0x32, 0xea, 0xe3, 0x5b, 0x2c, 0x89,
|
||||
0xd5, 0x2c, 0xa0, 0x1d, 0xfa, 0x92, 0x8f, 0x9c, 0xaa, 0xce, 0x02, 0x17, 0x0d, 0xc8, 0x9c, 0x14,
|
||||
0xf0, 0x39, 0xda, 0x4c, 0xa5, 0x87, 0x9c, 0x07, 0x5c, 0xaf, 0x6d, 0x63, 0x89, 0x8c, 0x94, 0xbd,
|
||||
0x63, 0xc6, 0xe3, 0xda, 0x66, 0x73, 0x0a, 0xf0, 0xf7, 0xb8, 0xb6, 0x99, 0xd3, 0x93, 0x3c, 0x78,
|
||||
0x12, 0xab, 0x03, 0xd3, 0x58, 0xeb, 0xb7, 0x89, 0x75, 0x00, 0x8b, 0x63, 0xe5, 0xc0, 0xab, 0x87,
|
||||
0xe8, 0x8d, 0x05, 0x2d, 0xc2, 0x65, 0xb4, 0xd6, 0x87, 0x51, 0xca, 0x44, 0x92, 0xfc, 0xc4, 0xdb,
|
||||
0x68, 0x63, 0x48, 0x07, 0x51, 0xca, 0xb8, 0x07, 0x24, 0x7d, 0x7c, 0xb8, 0xfa, 0xdc, 0xa8, 0xff,
|
||||
0xb9, 0x8a, 0x1e, 0x67, 0x13, 0xe0, 0xac, 0x1d, 0x49, 0x10, 0xea, 0xc3, 0x7a, 0x07, 0x17, 0x7a,
|
||||
0x0f, 0xa1, 0x0e, 0x67, 0x43, 0xe0, 0x8a, 0xad, 0x2a, 0xb5, 0xa9, 0xc7, 0x41, 0xa6, 0x21, 0x39,
|
||||
0x2b, 0x3c, 0x44, 0x28, 0xa4, 0x9c, 0x7a, 0x20, 0x81, 0x27, 0x47, 0x38, 0xe1, 0x97, 0xb3, 0x1c,
|
||||
0xbf, 0xf2, 0xd5, 0x59, 0x2f, 0x32, 0x90, 0x94, 0x56, 0x59, 0xdc, 0xa9, 0x82, 0xe4, 0x22, 0x55,
|
||||
0x3f, 0x42, 0x0f, 0x67, 0x5c, 0x6e, 0xd4, 0xe6, 0x57, 0x06, 0x7a, 0x73, 0x6e, 0x22, 0x77, 0x70,
|
||||
0xdf, 0x3f, 0xbf, 0x7a, 0xdf, 0xf7, 0x6e, 0xde, 0xad, 0x05, 0x47, 0xfe, 0x47, 0x03, 0xe5, 0xf9,
|
||||
0x89, 0x8f, 0xd1, 0x7a, 0xf2, 0xf7, 0xac, 0x2e, 0xe1, 0xed, 0xe5, 0x4a, 0x38, 0x63, 0x1e, 0x4c,
|
||||
0x3f, 0xb5, 0xc9, 0x8b, 0x28, 0x14, 0xfc, 0x16, 0xba, 0xef, 0x81, 0x10, 0xb4, 0x3b, 0xa1, 0xc6,
|
||||
0x43, 0x6d, 0x74, 0xff, 0x24, 0x15, 0x93, 0x89, 0xde, 0x69, 0x5e, 0x5c, 0x9a, 0x2b, 0x2f, 0x2f,
|
||||
0xcd, 0x95, 0xd7, 0x97, 0xe6, 0xca, 0x0f, 0xb1, 0x69, 0x5c, 0xc4, 0xa6, 0xf1, 0x32, 0x36, 0x8d,
|
||||
0xd7, 0xb1, 0x69, 0xfc, 0x1e, 0x9b, 0xc6, 0x4f, 0x7f, 0x98, 0x2b, 0x5f, 0x3e, 0xbd, 0xe6, 0x3f,
|
||||
0x98, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x2f, 0x75, 0xee, 0xf8, 0x0c, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) {
|
||||
@ -734,6 +799,115 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error)
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Parameters) > 0 {
|
||||
keysForParameters := make([]string, 0, len(m.Parameters))
|
||||
for k := range m.Parameters {
|
||||
keysForParameters = append(keysForParameters, string(k))
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
|
||||
for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- {
|
||||
v := m.Parameters[string(keysForParameters[iNdEx])]
|
||||
baseI := i
|
||||
i -= len(v)
|
||||
copy(dAtA[i:], v)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
i -= len(keysForParameters[iNdEx])
|
||||
copy(dAtA[i:], keysForParameters[iNdEx])
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
}
|
||||
i -= len(m.DriverName)
|
||||
copy(dAtA[i:], m.DriverName)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
{
|
||||
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Items) > 0 {
|
||||
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
{
|
||||
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *VolumeError) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
@ -915,6 +1089,44 @@ func (m *VolumeAttachmentStatus) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClass) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = m.ObjectMeta.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
l = len(m.DriverName)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Parameters) > 0 {
|
||||
for k, v := range m.Parameters {
|
||||
_ = k
|
||||
_ = v
|
||||
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
|
||||
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *VolumeAttributesClassList) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = m.ListMeta.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Items) > 0 {
|
||||
for _, e := range m.Items {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *VolumeError) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
@ -1038,6 +1250,44 @@ func (this *VolumeAttachmentStatus) String() string {
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *VolumeAttributesClass) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
keysForParameters := make([]string, 0, len(this.Parameters))
|
||||
for k := range this.Parameters {
|
||||
keysForParameters = append(keysForParameters, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
|
||||
mapStringForParameters := "map[string]string{"
|
||||
for _, k := range keysForParameters {
|
||||
mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k])
|
||||
}
|
||||
mapStringForParameters += "}"
|
||||
s := strings.Join([]string{`&VolumeAttributesClass{`,
|
||||
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
|
||||
`DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`,
|
||||
`Parameters:` + mapStringForParameters + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *VolumeAttributesClassList) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
repeatedStringForItems := "[]VolumeAttributesClass{"
|
||||
for _, f := range this.Items {
|
||||
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + ","
|
||||
}
|
||||
repeatedStringForItems += "}"
|
||||
s := strings.Join([]string{`&VolumeAttributesClassList{`,
|
||||
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
|
||||
`Items:` + repeatedStringForItems + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *VolumeError) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
@ -2198,6 +2448,365 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.DriverName = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Parameters == nil {
|
||||
m.Parameters = make(map[string]string)
|
||||
}
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
m.Parameters[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Items = append(m.Items, VolumeAttributesClass{})
|
||||
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *VolumeError) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
40
vendor/k8s.io/api/storage/v1alpha1/generated.proto
generated
vendored
40
vendor/k8s.io/api/storage/v1alpha1/generated.proto
generated
vendored
@ -216,6 +216,46 @@ message VolumeAttachmentStatus {
|
||||
optional VolumeError detachError = 4;
|
||||
}
|
||||
|
||||
// VolumeAttributesClass represents a specification of mutable volume attributes
|
||||
// defined by the CSI driver. The class can be specified during dynamic provisioning
|
||||
// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.
|
||||
message VolumeAttributesClass {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Name of the CSI driver
|
||||
// This field is immutable.
|
||||
optional string driverName = 2;
|
||||
|
||||
// parameters hold volume attributes defined by the CSI driver. These values
|
||||
// are opaque to the Kubernetes and are passed directly to the CSI driver.
|
||||
// The underlying storage provider supports changing these attributes on an
|
||||
// existing volume, however the parameters field itself is immutable. To
|
||||
// invoke a volume update, a new VolumeAttributesClass should be created with
|
||||
// new parameters, and the PersistentVolumeClaim should be updated to reference
|
||||
// the new VolumeAttributesClass.
|
||||
//
|
||||
// This field is required and must contain at least one key/value pair.
|
||||
// The keys cannot be empty, and the maximum number of parameters is 512, with
|
||||
// a cumulative max size of 256K. If the CSI driver rejects invalid parameters,
|
||||
// the target PersistentVolumeClaim will be set to an "Infeasible" state in the
|
||||
// modifyVolumeStatus field.
|
||||
map<string, string> parameters = 3;
|
||||
}
|
||||
|
||||
// VolumeAttributesClassList is a collection of VolumeAttributesClass objects.
|
||||
message VolumeAttributesClassList {
|
||||
// Standard list metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of VolumeAttributesClass objects.
|
||||
repeated VolumeAttributesClass items = 2;
|
||||
}
|
||||
|
||||
// VolumeError captures an error encountered during a volume operation.
|
||||
message VolumeError {
|
||||
// time represents the time the error was encountered.
|
||||
|
2
vendor/k8s.io/api/storage/v1alpha1/register.go
generated
vendored
2
vendor/k8s.io/api/storage/v1alpha1/register.go
generated
vendored
@ -45,6 +45,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&VolumeAttachmentList{},
|
||||
&CSIStorageCapacity{},
|
||||
&CSIStorageCapacityList{},
|
||||
&VolumeAttributesClass{},
|
||||
&VolumeAttributesClassList{},
|
||||
)
|
||||
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
|
52
vendor/k8s.io/api/storage/v1alpha1/types.go
generated
vendored
52
vendor/k8s.io/api/storage/v1alpha1/types.go
generated
vendored
@ -251,3 +251,55 @@ type CSIStorageCapacityList struct {
|
||||
// +listMapKey=name
|
||||
Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.29
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VolumeAttributesClass represents a specification of mutable volume attributes
|
||||
// defined by the CSI driver. The class can be specified during dynamic provisioning
|
||||
// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.
|
||||
type VolumeAttributesClass struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Name of the CSI driver
|
||||
// This field is immutable.
|
||||
DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"`
|
||||
|
||||
// parameters hold volume attributes defined by the CSI driver. These values
|
||||
// are opaque to the Kubernetes and are passed directly to the CSI driver.
|
||||
// The underlying storage provider supports changing these attributes on an
|
||||
// existing volume, however the parameters field itself is immutable. To
|
||||
// invoke a volume update, a new VolumeAttributesClass should be created with
|
||||
// new parameters, and the PersistentVolumeClaim should be updated to reference
|
||||
// the new VolumeAttributesClass.
|
||||
//
|
||||
// This field is required and must contain at least one key/value pair.
|
||||
// The keys cannot be empty, and the maximum number of parameters is 512, with
|
||||
// a cumulative max size of 256K. If the CSI driver rejects invalid parameters,
|
||||
// the target PersistentVolumeClaim will be set to an "Infeasible" state in the
|
||||
// modifyVolumeStatus field.
|
||||
Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"`
|
||||
}
|
||||
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.29
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VolumeAttributesClassList is a collection of VolumeAttributesClass objects.
|
||||
type VolumeAttributesClassList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Standard list metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// items is the list of VolumeAttributesClass objects.
|
||||
Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
21
vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
generated
vendored
21
vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go
generated
vendored
@ -103,6 +103,27 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string {
|
||||
return map_VolumeAttachmentStatus
|
||||
}
|
||||
|
||||
var map_VolumeAttributesClass = map[string]string{
|
||||
"": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"driverName": "Name of the CSI driver This field is immutable.",
|
||||
"parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.",
|
||||
}
|
||||
|
||||
func (VolumeAttributesClass) SwaggerDoc() map[string]string {
|
||||
return map_VolumeAttributesClass
|
||||
}
|
||||
|
||||
var map_VolumeAttributesClassList = map[string]string{
|
||||
"": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.",
|
||||
"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "items is the list of VolumeAttributesClass objects.",
|
||||
}
|
||||
|
||||
func (VolumeAttributesClassList) SwaggerDoc() map[string]string {
|
||||
return map_VolumeAttributesClassList
|
||||
}
|
||||
|
||||
var map_VolumeError = map[string]string{
|
||||
"": "VolumeError captures an error encountered during a volume operation.",
|
||||
"time": "time represents the time the error was encountered.",
|
||||
|
66
vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go
generated
vendored
66
vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@ -238,6 +238,72 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.Parameters != nil {
|
||||
in, out := &in.Parameters, &out.Parameters
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass.
|
||||
func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeAttributesClass)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]VolumeAttributesClass, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList.
|
||||
func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeAttributesClassList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeError) DeepCopyInto(out *VolumeError) {
|
||||
*out = *in
|
||||
|
36
vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
36
vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -120,3 +120,39 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin
|
||||
func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 24
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 35
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 35
|
||||
}
|
||||
|
2
vendor/k8s.io/api/storage/v1beta1/generated.proto
generated
vendored
2
vendor/k8s.io/api/storage/v1beta1/generated.proto
generated
vendored
@ -91,7 +91,7 @@ message CSIDriverSpec {
|
||||
// If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
|
||||
// The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
|
||||
//
|
||||
// The following VolumeConext will be passed if podInfoOnMount is set to true.
|
||||
// The following VolumeContext will be passed if podInfoOnMount is set to true.
|
||||
// This list might grow, but the prefix will be used.
|
||||
// "csi.storage.k8s.io/pod.name": pod.Name
|
||||
// "csi.storage.k8s.io/pod.namespace": pod.Namespace
|
||||
|
2
vendor/k8s.io/api/storage/v1beta1/types.go
generated
vendored
2
vendor/k8s.io/api/storage/v1beta1/types.go
generated
vendored
@ -311,7 +311,7 @@ type CSIDriverSpec struct {
|
||||
// If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
|
||||
// The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
|
||||
//
|
||||
// The following VolumeConext will be passed if podInfoOnMount is set to true.
|
||||
// The following VolumeContext will be passed if podInfoOnMount is set to true.
|
||||
// This list might grow, but the prefix will be used.
|
||||
// "csi.storage.k8s.io/pod.name": pod.Name
|
||||
// "csi.storage.k8s.io/pod.namespace": pod.Namespace
|
||||
|
2
vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
generated
vendored
@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string {
|
||||
var map_CSIDriverSpec = map[string]string{
|
||||
"": "CSIDriverSpec is the specification of a CSIDriver.",
|
||||
"attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.",
|
||||
"podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.",
|
||||
"podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.",
|
||||
"volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.",
|
||||
"storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.",
|
||||
"fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.",
|
||||
|
31
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go
generated
vendored
31
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go
generated
vendored
@ -210,6 +210,19 @@ type ValidationRule struct {
|
||||
// - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
|
||||
// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
|
||||
// non-intersecting keys are appended, retaining their partial order.
|
||||
//
|
||||
// If `rule` makes use of the `oldSelf` variable it is implicitly a
|
||||
// `transition rule`.
|
||||
//
|
||||
// By default, the `oldSelf` variable is the same type as `self`.
|
||||
// When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional
|
||||
// variable whose value() is the same type as `self`.
|
||||
// See the documentation for the `optionalOldSelf` field for details.
|
||||
//
|
||||
// Transition rules by default are applied only on UPDATE requests and are
|
||||
// skipped if an old value could not be found. You can opt a transition
|
||||
// rule into unconditional evaluation by setting `optionalOldSelf` to true.
|
||||
//
|
||||
Rule string
|
||||
// Message represents the message displayed when validation fails. The message is required if the Rule contains
|
||||
// line breaks. The message must not contain line breaks.
|
||||
@ -246,6 +259,24 @@ type ValidationRule struct {
|
||||
// e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
|
||||
// +optional
|
||||
FieldPath string
|
||||
|
||||
// optionalOldSelf is used to opt a transition rule into evaluation
|
||||
// even when the object is first created, or if the old object is
|
||||
// missing the value.
|
||||
//
|
||||
// When enabled `oldSelf` will be a CEL optional whose value will be
|
||||
// `None` if there is no old value, or when the object is initially created.
|
||||
//
|
||||
// You may check for presence of oldSelf using `oldSelf.hasValue()` and
|
||||
// unwrap it after checking using `oldSelf.value()`. Check the CEL
|
||||
// documentation for Optional types for more information:
|
||||
// https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes
|
||||
//
|
||||
// May not be set unless `oldSelf` is used in `rule`.
|
||||
//
|
||||
// +featureGate=CRDValidationRatcheting
|
||||
// +optional
|
||||
OptionalOldSelf *bool
|
||||
}
|
||||
|
||||
// JSON represents any valid JSON value.
|
||||
|
429
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go
generated
vendored
429
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go
generated
vendored
@ -814,202 +814,204 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptor_f5a35c9667703937 = []byte{
|
||||
// 3111 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdd, 0x6f, 0x5c, 0x47,
|
||||
0x15, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0x6c, 0x6e, 0xdc, 0xc4, 0xeb, 0x6c,
|
||||
0x68, 0x70, 0xdb, 0x74, 0xdd, 0x9a, 0x96, 0x86, 0x82, 0x40, 0x5e, 0xdb, 0x69, 0xdd, 0xd8, 0xb1,
|
||||
0x35, 0x9b, 0xa4, 0x6e, 0x8b, 0x68, 0xaf, 0xf7, 0x8e, 0xd7, 0xb7, 0xbe, 0x5f, 0x99, 0xb9, 0xd7,
|
||||
0x1f, 0x12, 0x48, 0x15, 0xa8, 0x02, 0x2a, 0x41, 0x79, 0xa8, 0xca, 0x13, 0x42, 0x08, 0xf5, 0x01,
|
||||
0x1e, 0xe0, 0x0d, 0xfe, 0x85, 0xbe, 0x20, 0xf5, 0x09, 0x55, 0x42, 0x5a, 0xd1, 0xe5, 0x1f, 0x40,
|
||||
0x02, 0x84, 0xf0, 0x03, 0x42, 0xf3, 0x71, 0xe7, 0xce, 0xde, 0xdd, 0x4d, 0x22, 0x7b, 0xdd, 0xbe,
|
||||
0xed, 0x9e, 0x73, 0xe6, 0xfc, 0xce, 0x9c, 0x39, 0x73, 0xe6, 0xcc, 0xb9, 0x03, 0xac, 0xdd, 0x1b,
|
||||
0xb4, 0xec, 0x04, 0x73, 0xbb, 0xf1, 0x16, 0x26, 0x3e, 0x8e, 0x30, 0x9d, 0xdb, 0xc3, 0xbe, 0x1d,
|
||||
0x90, 0x39, 0xc9, 0xb0, 0x42, 0x07, 0x1f, 0x44, 0xd8, 0xa7, 0x4e, 0xe0, 0xd3, 0xa7, 0xad, 0xd0,
|
||||
0xa1, 0x98, 0xec, 0x61, 0x32, 0x17, 0xee, 0xd6, 0x19, 0x8f, 0xb6, 0x0a, 0xcc, 0xed, 0x3d, 0x3b,
|
||||
0x57, 0xc7, 0x3e, 0x26, 0x56, 0x84, 0xed, 0x72, 0x48, 0x82, 0x28, 0x80, 0x37, 0x84, 0xa6, 0x72,
|
||||
0x8b, 0xe0, 0x9b, 0x4a, 0x53, 0x39, 0xdc, 0xad, 0x33, 0x1e, 0x6d, 0x15, 0x28, 0xef, 0x3d, 0x3b,
|
||||
0xf5, 0x74, 0xdd, 0x89, 0x76, 0xe2, 0xad, 0x72, 0x2d, 0xf0, 0xe6, 0xea, 0x41, 0x3d, 0x98, 0xe3,
|
||||
0x0a, 0xb7, 0xe2, 0x6d, 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xd0, 0xd4, 0x73, 0xa9, 0xc9, 0x9e,
|
||||
0x55, 0xdb, 0x71, 0x7c, 0x4c, 0x0e, 0x53, 0x3b, 0x3d, 0x1c, 0x59, 0x1d, 0xcc, 0x9b, 0x9a, 0xeb,
|
||||
0x36, 0x8a, 0xc4, 0x7e, 0xe4, 0x78, 0xb8, 0x6d, 0xc0, 0xd7, 0x1e, 0x36, 0x80, 0xd6, 0x76, 0xb0,
|
||||
0x67, 0x65, 0xc7, 0x95, 0x8e, 0x0c, 0x30, 0xbe, 0x18, 0xf8, 0x7b, 0x98, 0xb0, 0x09, 0x22, 0x7c,
|
||||
0x3f, 0xc6, 0x34, 0x82, 0x15, 0xd0, 0x17, 0x3b, 0xb6, 0x69, 0xcc, 0x18, 0xb3, 0x43, 0x95, 0x67,
|
||||
0x3e, 0x6e, 0x14, 0xcf, 0x34, 0x1b, 0xc5, 0xbe, 0xbb, 0x2b, 0x4b, 0x47, 0x8d, 0xe2, 0x95, 0x6e,
|
||||
0x48, 0xd1, 0x61, 0x88, 0x69, 0xf9, 0xee, 0xca, 0x12, 0x62, 0x83, 0xe1, 0x4b, 0x60, 0xdc, 0xc6,
|
||||
0xd4, 0x21, 0xd8, 0x5e, 0xd8, 0x58, 0xb9, 0x27, 0xf4, 0x9b, 0x39, 0xae, 0xf1, 0xa2, 0xd4, 0x38,
|
||||
0xbe, 0x94, 0x15, 0x40, 0xed, 0x63, 0xe0, 0x26, 0x18, 0x0c, 0xb6, 0xde, 0xc6, 0xb5, 0x88, 0x9a,
|
||||
0x7d, 0x33, 0x7d, 0xb3, 0xc3, 0xf3, 0x4f, 0x97, 0xd3, 0xc5, 0x53, 0x26, 0xf0, 0x15, 0x93, 0x93,
|
||||
0x2d, 0x23, 0x6b, 0x7f, 0x39, 0x59, 0xb4, 0xca, 0xa8, 0x44, 0x1b, 0x5c, 0x17, 0x5a, 0x50, 0xa2,
|
||||
0xae, 0xf4, 0x9b, 0x1c, 0x80, 0xfa, 0xe4, 0x69, 0x18, 0xf8, 0x14, 0xf7, 0x64, 0xf6, 0x14, 0x8c,
|
||||
0xd5, 0xb8, 0xe6, 0x08, 0xdb, 0x12, 0xd7, 0xcc, 0x1d, 0xc7, 0x7a, 0x53, 0xe2, 0x8f, 0x2d, 0x66,
|
||||
0xd4, 0xa1, 0x36, 0x00, 0x78, 0x07, 0x0c, 0x10, 0x4c, 0x63, 0x37, 0x32, 0xfb, 0x66, 0x8c, 0xd9,
|
||||
0xe1, 0xf9, 0xeb, 0x5d, 0xa1, 0x78, 0x68, 0xb3, 0xe0, 0x2b, 0xef, 0x3d, 0x5b, 0xae, 0x46, 0x56,
|
||||
0x14, 0xd3, 0xca, 0x39, 0x89, 0x34, 0x80, 0xb8, 0x0e, 0x24, 0x75, 0x95, 0xfe, 0x67, 0x80, 0x31,
|
||||
0xdd, 0x4b, 0x7b, 0x0e, 0xde, 0x87, 0x04, 0x0c, 0x12, 0x11, 0x2c, 0xdc, 0x4f, 0xc3, 0xf3, 0xb7,
|
||||
0xca, 0xc7, 0xdd, 0x51, 0xe5, 0xb6, 0xf8, 0xab, 0x0c, 0xb3, 0xe5, 0x92, 0x7f, 0x50, 0x02, 0x04,
|
||||
0xf7, 0x40, 0x81, 0xc8, 0x35, 0xe2, 0x81, 0x34, 0x3c, 0xbf, 0xda, 0x1b, 0x50, 0xa1, 0xb3, 0x32,
|
||||
0xd2, 0x6c, 0x14, 0x0b, 0xc9, 0x3f, 0xa4, 0xb0, 0x4a, 0xbf, 0xca, 0x81, 0xe9, 0xc5, 0x98, 0x46,
|
||||
0x81, 0x87, 0x30, 0x0d, 0x62, 0x52, 0xc3, 0x8b, 0x81, 0x1b, 0x7b, 0xfe, 0x12, 0xde, 0x76, 0x7c,
|
||||
0x27, 0x62, 0x31, 0x3a, 0x03, 0xfa, 0x7d, 0xcb, 0xc3, 0x32, 0x66, 0x46, 0xa4, 0x27, 0xfb, 0x6f,
|
||||
0x5b, 0x1e, 0x46, 0x9c, 0xc3, 0x24, 0x58, 0x88, 0xc8, 0x1d, 0xa0, 0x24, 0xee, 0x1c, 0x86, 0x18,
|
||||
0x71, 0x0e, 0xbc, 0x06, 0x06, 0xb6, 0x03, 0xe2, 0x59, 0x62, 0xf5, 0x86, 0xd2, 0xf5, 0xb8, 0xc9,
|
||||
0xa9, 0x48, 0x72, 0xe1, 0xf3, 0x60, 0xd8, 0xc6, 0xb4, 0x46, 0x9c, 0x90, 0x41, 0x9b, 0xfd, 0x5c,
|
||||
0xf8, 0xbc, 0x14, 0x1e, 0x5e, 0x4a, 0x59, 0x48, 0x97, 0x83, 0xd7, 0x41, 0x21, 0x24, 0x4e, 0x40,
|
||||
0x9c, 0xe8, 0xd0, 0xcc, 0xcf, 0x18, 0xb3, 0xf9, 0xca, 0x98, 0x1c, 0x53, 0xd8, 0x90, 0x74, 0xa4,
|
||||
0x24, 0x98, 0xf4, 0xdb, 0x34, 0xf0, 0x37, 0xac, 0x68, 0xc7, 0x1c, 0xe0, 0x08, 0x4a, 0xfa, 0x95,
|
||||
0xea, 0xfa, 0x6d, 0x46, 0x47, 0x4a, 0xa2, 0xf4, 0x17, 0x03, 0x98, 0x59, 0x0f, 0x25, 0xee, 0x85,
|
||||
0x37, 0x41, 0x81, 0x46, 0x2c, 0xe7, 0xd4, 0x0f, 0xa5, 0x7f, 0x9e, 0x4c, 0x54, 0x55, 0x25, 0xfd,
|
||||
0xa8, 0x51, 0x9c, 0x4c, 0x47, 0x24, 0x54, 0xee, 0x1b, 0x35, 0x96, 0x85, 0xdc, 0x3e, 0xde, 0xda,
|
||||
0x09, 0x82, 0x5d, 0xb9, 0xfa, 0x27, 0x08, 0xb9, 0x57, 0x85, 0xa2, 0x14, 0x53, 0x84, 0x9c, 0x24,
|
||||
0xa3, 0x04, 0xa8, 0xf4, 0xdf, 0x5c, 0x76, 0x62, 0xda, 0xa2, 0xbf, 0x05, 0x0a, 0x6c, 0x0b, 0xd9,
|
||||
0x56, 0x64, 0xc9, 0x4d, 0xf0, 0xcc, 0xa3, 0x6d, 0x38, 0xb1, 0x5f, 0xd7, 0x70, 0x64, 0x55, 0xa0,
|
||||
0x74, 0x05, 0x48, 0x69, 0x48, 0x69, 0x85, 0x07, 0xa0, 0x9f, 0x86, 0xb8, 0x26, 0xe7, 0x7b, 0xef,
|
||||
0x04, 0xd1, 0xde, 0x65, 0x0e, 0xd5, 0x10, 0xd7, 0xd2, 0x60, 0x64, 0xff, 0x10, 0x47, 0x84, 0xef,
|
||||
0x18, 0x60, 0x80, 0xf2, 0xbc, 0x20, 0x73, 0xc9, 0xe6, 0x29, 0x80, 0x67, 0xf2, 0x8e, 0xf8, 0x8f,
|
||||
0x24, 0x6e, 0xe9, 0x5f, 0x39, 0x70, 0xa5, 0xdb, 0xd0, 0xc5, 0xc0, 0xb7, 0xc5, 0x22, 0xac, 0xc8,
|
||||
0x7d, 0x25, 0x22, 0xeb, 0x79, 0x7d, 0x5f, 0x1d, 0x35, 0x8a, 0x8f, 0x3f, 0x54, 0x81, 0xb6, 0x01,
|
||||
0xbf, 0xae, 0xa6, 0x2c, 0x36, 0xe9, 0x95, 0x56, 0xc3, 0x8e, 0x1a, 0xc5, 0x51, 0x35, 0xac, 0xd5,
|
||||
0x56, 0xb8, 0x07, 0xa0, 0x6b, 0xd1, 0xe8, 0x0e, 0xb1, 0x7c, 0x2a, 0xd4, 0x3a, 0x1e, 0x96, 0x9e,
|
||||
0x7b, 0xf2, 0xd1, 0x82, 0x82, 0x8d, 0xa8, 0x4c, 0x49, 0x48, 0xb8, 0xda, 0xa6, 0x0d, 0x75, 0x40,
|
||||
0x60, 0x39, 0x83, 0x60, 0x8b, 0xaa, 0x34, 0xa0, 0xe5, 0x70, 0x46, 0x45, 0x92, 0x0b, 0x9f, 0x00,
|
||||
0x83, 0x1e, 0xa6, 0xd4, 0xaa, 0x63, 0xbe, 0xf7, 0x87, 0xd2, 0x43, 0x71, 0x4d, 0x90, 0x51, 0xc2,
|
||||
0x2f, 0xfd, 0xdb, 0x00, 0x97, 0xba, 0x79, 0x6d, 0xd5, 0xa1, 0x11, 0xfc, 0x4e, 0x5b, 0xd8, 0x97,
|
||||
0x1f, 0x6d, 0x86, 0x6c, 0x34, 0x0f, 0x7a, 0x95, 0x4a, 0x12, 0x8a, 0x16, 0xf2, 0xfb, 0x20, 0xef,
|
||||
0x44, 0xd8, 0x4b, 0x4e, 0x4b, 0xd4, 0xfb, 0xb0, 0xab, 0x9c, 0x95, 0xf0, 0xf9, 0x15, 0x06, 0x84,
|
||||
0x04, 0x5e, 0xe9, 0xa3, 0x1c, 0xb8, 0xdc, 0x6d, 0x08, 0xcb, 0xe3, 0x94, 0x39, 0x3b, 0x74, 0x63,
|
||||
0x62, 0xb9, 0x32, 0xd8, 0x94, 0xb3, 0x37, 0x38, 0x15, 0x49, 0x2e, 0xcb, 0x9d, 0xd4, 0xf1, 0xeb,
|
||||
0xb1, 0x6b, 0x11, 0x19, 0x49, 0x6a, 0xc2, 0x55, 0x49, 0x47, 0x4a, 0x02, 0x96, 0x01, 0xa0, 0x3b,
|
||||
0x01, 0x89, 0x38, 0x06, 0xaf, 0x70, 0x86, 0x2a, 0xe7, 0x58, 0x46, 0xa8, 0x2a, 0x2a, 0xd2, 0x24,
|
||||
0xd8, 0x41, 0xb2, 0xeb, 0xf8, 0xb6, 0x5c, 0x70, 0xb5, 0x77, 0x6f, 0x39, 0xbe, 0x8d, 0x38, 0x87,
|
||||
0xe1, 0xbb, 0x0e, 0x8d, 0x18, 0x45, 0xae, 0x76, 0x8b, 0xc3, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x35,
|
||||
0x96, 0x60, 0x03, 0xe2, 0x60, 0x6a, 0x0e, 0xa4, 0xf8, 0x8b, 0x8a, 0x8a, 0x34, 0x89, 0xd2, 0x5f,
|
||||
0xfb, 0xbb, 0xc7, 0x07, 0x4b, 0x20, 0xf0, 0x2a, 0xc8, 0xd7, 0x49, 0x10, 0x87, 0xd2, 0x4b, 0xca,
|
||||
0xdb, 0x2f, 0x31, 0x22, 0x12, 0x3c, 0xf8, 0x3d, 0x90, 0xf7, 0xe5, 0x84, 0x59, 0x04, 0xbd, 0xda,
|
||||
0xfb, 0x65, 0xe6, 0xde, 0x4a, 0xd1, 0x85, 0x23, 0x05, 0x28, 0x7c, 0x0e, 0xe4, 0x69, 0x2d, 0x08,
|
||||
0xb1, 0x74, 0xe2, 0x74, 0x22, 0x54, 0x65, 0xc4, 0xa3, 0x46, 0xf1, 0x6c, 0xa2, 0x8e, 0x13, 0x90,
|
||||
0x10, 0x86, 0x3f, 0x32, 0x40, 0x41, 0x1e, 0x17, 0xd4, 0x1c, 0xe4, 0xe1, 0xf9, 0x5a, 0xef, 0xed,
|
||||
0x96, 0x65, 0x6f, 0xba, 0x66, 0x92, 0x40, 0x91, 0x02, 0x87, 0x3f, 0x30, 0x00, 0xa8, 0xa9, 0xb3,
|
||||
0xcb, 0x1c, 0xe2, 0x3e, 0xec, 0xd9, 0x56, 0xd1, 0x4e, 0x45, 0x11, 0x08, 0x69, 0xa9, 0xa4, 0xa1,
|
||||
0xc2, 0x2a, 0x98, 0x08, 0x09, 0xe6, 0xba, 0xef, 0xfa, 0xbb, 0x7e, 0xb0, 0xef, 0xdf, 0x74, 0xb0,
|
||||
0x6b, 0x53, 0x13, 0xcc, 0x18, 0xb3, 0x85, 0xca, 0x65, 0x69, 0xff, 0xc4, 0x46, 0x27, 0x21, 0xd4,
|
||||
0x79, 0x6c, 0xe9, 0xdd, 0xbe, 0x6c, 0xad, 0x95, 0x3d, 0x2f, 0xe0, 0xfb, 0x62, 0xf2, 0x22, 0x0f,
|
||||
0x53, 0xd3, 0xe0, 0x0b, 0xf1, 0x46, 0xef, 0x17, 0x42, 0xe5, 0xfa, 0xf4, 0x90, 0x56, 0x24, 0x8a,
|
||||
0x34, 0x13, 0xe0, 0x07, 0x06, 0x38, 0x6b, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, 0xb6, 0x71, 0xee,
|
||||
0x74, 0xa3, 0x7a, 0x42, 0x1a, 0x74, 0x76, 0x41, 0x47, 0x45, 0xad, 0x46, 0xc0, 0x17, 0xc1, 0x39,
|
||||
0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x9b, 0x8d, 0xe2, 0xb9, 0x6a, 0x0b, 0x07,
|
||||
0x65, 0x24, 0x4b, 0x9f, 0xe4, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, 0x35, 0x30, 0xc0,
|
||||
0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, 0x89, 0xe1, 0xb3,
|
||||
0xe3, 0xa9, 0x8f, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x79, 0x00, 0x6c, 0x1c,
|
||||
0x12, 0xcc, 0x32, 0x92, 0x6d, 0x0e, 0x72, 0x69, 0xb5, 0x3e, 0x4b, 0x8a, 0x83, 0x34, 0x29, 0x78,
|
||||
0x13, 0xc0, 0xe4, 0x9f, 0x13, 0xf8, 0xaf, 0x5a, 0xc4, 0x77, 0xfc, 0xba, 0x59, 0xe0, 0x66, 0x4f,
|
||||
0xb2, 0xd3, 0x76, 0xa9, 0x8d, 0x8b, 0x3a, 0x8c, 0x80, 0x7b, 0x60, 0x40, 0x5c, 0xa3, 0x79, 0xde,
|
||||
0xe8, 0xe1, 0x8e, 0xbb, 0x67, 0xb9, 0x8e, 0xcd, 0xa1, 0x2a, 0x80, 0xbb, 0x87, 0xa3, 0x20, 0x89,
|
||||
0x06, 0xdf, 0x33, 0xc0, 0x08, 0x8d, 0xb7, 0x88, 0x94, 0xa6, 0x3c, 0xab, 0x0f, 0xcf, 0xdf, 0xe9,
|
||||
0x15, 0x7c, 0x55, 0xd3, 0x5d, 0x19, 0x6b, 0x36, 0x8a, 0x23, 0x3a, 0x05, 0xb5, 0x60, 0xc3, 0x3f,
|
||||
0x1a, 0xc0, 0xb4, 0x6c, 0x11, 0xfa, 0x96, 0xbb, 0x41, 0x1c, 0x3f, 0xc2, 0x44, 0x5c, 0x88, 0xc4,
|
||||
0xf1, 0xd1, 0xc3, 0x5a, 0x31, 0x7b, 0xcf, 0xaa, 0xcc, 0xc8, 0x95, 0x36, 0x17, 0xba, 0x58, 0x80,
|
||||
0xba, 0xda, 0x56, 0xfa, 0x8f, 0x91, 0x4d, 0x2d, 0xda, 0x2c, 0xab, 0x35, 0xcb, 0xc5, 0x70, 0x09,
|
||||
0x8c, 0xb1, 0xea, 0x17, 0xe1, 0xd0, 0x75, 0x6a, 0x16, 0xe5, 0xb7, 0x1f, 0x11, 0xdd, 0xea, 0x1a,
|
||||
0x5e, 0xcd, 0xf0, 0x51, 0xdb, 0x08, 0xf8, 0x0a, 0x80, 0xa2, 0x2c, 0x6c, 0xd1, 0x23, 0x2a, 0x01,
|
||||
0x55, 0xe0, 0x55, 0xdb, 0x24, 0x50, 0x87, 0x51, 0x70, 0x11, 0x8c, 0xbb, 0xd6, 0x16, 0x76, 0xab,
|
||||
0xd8, 0xc5, 0xb5, 0x28, 0x20, 0x5c, 0x95, 0xb8, 0x1f, 0x4e, 0x34, 0x1b, 0xc5, 0xf1, 0xd5, 0x2c,
|
||||
0x13, 0xb5, 0xcb, 0x97, 0xae, 0x64, 0xf7, 0xb2, 0x3e, 0x71, 0x51, 0x6c, 0x7f, 0x98, 0x03, 0x53,
|
||||
0xdd, 0x83, 0x02, 0x7e, 0x5f, 0x95, 0xc6, 0xa2, 0xe2, 0x7b, 0xed, 0x14, 0x42, 0x4f, 0x5e, 0x07,
|
||||
0x40, 0xfb, 0x55, 0x00, 0x1e, 0xb2, 0xf3, 0xda, 0x72, 0x93, 0x6b, 0xff, 0xe6, 0x69, 0xa0, 0x33,
|
||||
0xfd, 0x95, 0x21, 0x51, 0x05, 0x58, 0x2e, 0x3f, 0xf4, 0x2d, 0x17, 0x97, 0x3e, 0x6a, 0xbb, 0xda,
|
||||
0xa6, 0x9b, 0x15, 0xfe, 0xd8, 0x00, 0xa3, 0x41, 0x88, 0xfd, 0x85, 0x8d, 0x95, 0x7b, 0x5f, 0x15,
|
||||
0x9b, 0x56, 0x3a, 0x68, 0xe5, 0xf8, 0x26, 0xb2, 0xfb, 0xb5, 0xd0, 0xb5, 0x41, 0x82, 0x90, 0x56,
|
||||
0xce, 0x37, 0x1b, 0xc5, 0xd1, 0xf5, 0x56, 0x14, 0x94, 0x85, 0x2d, 0x79, 0x60, 0x62, 0xf9, 0x20,
|
||||
0xc2, 0xc4, 0xb7, 0xdc, 0xa5, 0xa0, 0x16, 0x7b, 0xd8, 0x8f, 0x84, 0x8d, 0x99, 0x76, 0x81, 0xf1,
|
||||
0x88, 0xed, 0x82, 0xcb, 0xa0, 0x2f, 0x26, 0xae, 0x8c, 0xda, 0x61, 0xd5, 0x04, 0x43, 0xab, 0x88,
|
||||
0xd1, 0x4b, 0x57, 0x40, 0x3f, 0xb3, 0x13, 0x5e, 0x04, 0x7d, 0xc4, 0xda, 0xe7, 0x5a, 0x47, 0x2a,
|
||||
0x83, 0x4c, 0x04, 0x59, 0xfb, 0x88, 0xd1, 0x4a, 0xff, 0x98, 0x01, 0xa3, 0x99, 0xb9, 0xc0, 0x29,
|
||||
0x90, 0x53, 0x9d, 0x35, 0x20, 0x95, 0xe6, 0x56, 0x96, 0x50, 0xce, 0xb1, 0xe1, 0x0b, 0x2a, 0xbb,
|
||||
0x0a, 0xd0, 0xa2, 0x3a, 0x2c, 0x38, 0x95, 0x95, 0x65, 0xa9, 0x3a, 0x66, 0x48, 0x92, 0x1e, 0x99,
|
||||
0x0d, 0x78, 0x5b, 0xee, 0x0a, 0x61, 0x03, 0xde, 0x46, 0x8c, 0x76, 0xdc, 0x5e, 0x49, 0xd2, 0xac,
|
||||
0xc9, 0x3f, 0x42, 0xb3, 0x66, 0xe0, 0x81, 0xcd, 0x9a, 0xab, 0x20, 0x1f, 0x39, 0x91, 0x8b, 0xf9,
|
||||
0x49, 0xa5, 0x15, 0xc3, 0x77, 0x18, 0x11, 0x09, 0x1e, 0xc4, 0x60, 0xd0, 0xc6, 0xdb, 0x56, 0xec,
|
||||
0x46, 0xfc, 0x50, 0x1a, 0x9e, 0xff, 0xd6, 0xc9, 0xa2, 0x47, 0x34, 0x33, 0x96, 0x84, 0x4a, 0x94,
|
||||
0xe8, 0x86, 0x8f, 0x83, 0x41, 0xcf, 0x3a, 0x70, 0xbc, 0xd8, 0xe3, 0x15, 0xa3, 0x21, 0xc4, 0xd6,
|
||||
0x04, 0x09, 0x25, 0x3c, 0x96, 0x04, 0xf1, 0x41, 0xcd, 0x8d, 0xa9, 0xb3, 0x87, 0x25, 0x53, 0x96,
|
||||
0x74, 0x2a, 0x09, 0x2e, 0x67, 0xf8, 0xa8, 0x6d, 0x04, 0x07, 0x73, 0x7c, 0x3e, 0x78, 0x58, 0x03,
|
||||
0x13, 0x24, 0x94, 0xf0, 0x5a, 0xc1, 0xa4, 0xfc, 0x48, 0x37, 0x30, 0x39, 0xb8, 0x6d, 0x04, 0x7c,
|
||||
0x0a, 0x0c, 0x79, 0xd6, 0xc1, 0x2a, 0xf6, 0xeb, 0xd1, 0x8e, 0x79, 0x76, 0xc6, 0x98, 0xed, 0xab,
|
||||
0x9c, 0x6d, 0x36, 0x8a, 0x43, 0x6b, 0x09, 0x11, 0xa5, 0x7c, 0x2e, 0xec, 0xf8, 0x52, 0xf8, 0x9c,
|
||||
0x26, 0x9c, 0x10, 0x51, 0xca, 0x67, 0x95, 0x49, 0x68, 0x45, 0x6c, 0x5f, 0x99, 0xa3, 0xad, 0x17,
|
||||
0xe7, 0x0d, 0x41, 0x46, 0x09, 0x1f, 0xce, 0x82, 0x82, 0x67, 0x1d, 0xf0, 0x3b, 0xa5, 0x39, 0xc6,
|
||||
0xd5, 0xf2, 0x86, 0xe2, 0x9a, 0xa4, 0x21, 0xc5, 0xe5, 0x92, 0x8e, 0x2f, 0x24, 0xc7, 0x35, 0x49,
|
||||
0x49, 0x43, 0x8a, 0xcb, 0xe2, 0x37, 0xf6, 0x9d, 0xfb, 0x31, 0x16, 0xc2, 0x90, 0x7b, 0x46, 0xc5,
|
||||
0xef, 0xdd, 0x94, 0x85, 0x74, 0x39, 0x76, 0xa7, 0xf3, 0x62, 0x37, 0x72, 0x42, 0x17, 0xaf, 0x6f,
|
||||
0x9b, 0xe7, 0xb9, 0xff, 0x79, 0x29, 0xbf, 0xa6, 0xa8, 0x48, 0x93, 0x80, 0x6f, 0x81, 0x7e, 0xec,
|
||||
0xc7, 0x9e, 0x79, 0x81, 0x1f, 0xdf, 0x27, 0x8d, 0x3e, 0xb5, 0x5f, 0x96, 0xfd, 0xd8, 0x43, 0x5c,
|
||||
0x33, 0x7c, 0x01, 0x9c, 0xf5, 0xac, 0x03, 0x96, 0x04, 0x30, 0x89, 0xd8, 0x45, 0x73, 0x82, 0xcf,
|
||||
0x7b, 0x9c, 0x15, 0xb1, 0x6b, 0x3a, 0x03, 0xb5, 0xca, 0xf1, 0x81, 0x8e, 0xaf, 0x0d, 0x9c, 0xd4,
|
||||
0x06, 0xea, 0x0c, 0xd4, 0x2a, 0xc7, 0x9c, 0x4c, 0xf0, 0xfd, 0xd8, 0x21, 0xd8, 0x36, 0xbf, 0xc4,
|
||||
0xeb, 0x5e, 0xd9, 0xdf, 0x15, 0x34, 0xa4, 0xb8, 0xf0, 0x7e, 0xd2, 0x72, 0x30, 0xf9, 0xe6, 0xdb,
|
||||
0xe8, 0x59, 0xea, 0x5e, 0x27, 0x0b, 0x84, 0x58, 0x87, 0xe2, 0x54, 0xd1, 0x9b, 0x0d, 0xd0, 0x07,
|
||||
0x79, 0xcb, 0x75, 0xd7, 0xb7, 0xcd, 0x8b, 0xdc, 0xe3, 0x3d, 0x3c, 0x2d, 0x54, 0x86, 0x59, 0x60,
|
||||
0xfa, 0x91, 0x80, 0x61, 0x78, 0x81, 0xcf, 0x62, 0x61, 0xea, 0xd4, 0xf0, 0xd6, 0x99, 0x7e, 0x24,
|
||||
0x60, 0xf8, 0xfc, 0xfc, 0xc3, 0xf5, 0x6d, 0xf3, 0xb1, 0xd3, 0x9b, 0x1f, 0xd3, 0x8f, 0x04, 0x0c,
|
||||
0xb4, 0x41, 0x9f, 0x1f, 0x44, 0xe6, 0xa5, 0x5e, 0x9f, 0xbd, 0xfc, 0x34, 0xb9, 0x1d, 0x44, 0x88,
|
||||
0xa9, 0x87, 0x3f, 0x35, 0x00, 0x08, 0xd3, 0x48, 0xbc, 0x7c, 0xd2, 0x16, 0x40, 0x06, 0xad, 0x9c,
|
||||
0x46, 0xef, 0xb2, 0x1f, 0x91, 0xc3, 0xf4, 0x5e, 0xa3, 0x45, 0xb9, 0x66, 0x00, 0xfc, 0xa5, 0x01,
|
||||
0x2e, 0xe8, 0xe5, 0xae, 0xb2, 0x6c, 0x9a, 0xfb, 0x61, 0xbd, 0x87, 0x81, 0x5c, 0x09, 0x02, 0xb7,
|
||||
0x62, 0x36, 0x1b, 0xc5, 0x0b, 0x0b, 0x1d, 0x00, 0x51, 0x47, 0x33, 0xe0, 0x6f, 0x0d, 0x30, 0x2e,
|
||||
0xb3, 0xa3, 0x66, 0x5c, 0x91, 0xbb, 0xed, 0xad, 0x1e, 0xba, 0x2d, 0x0b, 0x21, 0xbc, 0xa7, 0xbe,
|
||||
0x32, 0xb6, 0xf1, 0x51, 0xbb, 0x55, 0xf0, 0x0f, 0x06, 0x18, 0xb1, 0x71, 0x88, 0x7d, 0x1b, 0xfb,
|
||||
0x35, 0x66, 0xe6, 0xcc, 0x49, 0xfb, 0x0a, 0x59, 0x33, 0x97, 0x34, 0xed, 0xc2, 0xc2, 0xb2, 0xb4,
|
||||
0x70, 0x44, 0x67, 0x1d, 0x35, 0x8a, 0x93, 0xe9, 0x50, 0x9d, 0x83, 0x5a, 0x0c, 0x84, 0x3f, 0x33,
|
||||
0xc0, 0x68, 0xea, 0x76, 0x71, 0x40, 0x5c, 0x39, 0x9d, 0x85, 0xe7, 0x25, 0xe8, 0x42, 0x2b, 0x16,
|
||||
0xca, 0x82, 0xc3, 0xdf, 0x19, 0xac, 0xda, 0x4a, 0xee, 0x6a, 0xd4, 0x2c, 0x71, 0x0f, 0xbe, 0xde,
|
||||
0x4b, 0x0f, 0x2a, 0xe5, 0xc2, 0x81, 0xd7, 0xd3, 0x4a, 0x4e, 0x71, 0x8e, 0x1a, 0xc5, 0x09, 0xdd,
|
||||
0x7f, 0x8a, 0x81, 0x74, 0xe3, 0xe0, 0xbb, 0x06, 0x18, 0xc1, 0x69, 0xc1, 0x4c, 0xcd, 0xab, 0x27,
|
||||
0x75, 0x5d, 0xc7, 0xf2, 0x5b, 0x5c, 0xa7, 0x35, 0x16, 0x45, 0x2d, 0xb0, 0xac, 0xf6, 0xc3, 0x07,
|
||||
0x96, 0x17, 0xba, 0xd8, 0xfc, 0x72, 0xef, 0x6a, 0xbf, 0x65, 0xa1, 0x12, 0x25, 0xba, 0xe1, 0x75,
|
||||
0x50, 0xf0, 0x63, 0xd7, 0xb5, 0xb6, 0x5c, 0x6c, 0x3e, 0xce, 0xab, 0x08, 0xd5, 0x5f, 0xbc, 0x2d,
|
||||
0xe9, 0x48, 0x49, 0xc0, 0x6d, 0x30, 0x73, 0x70, 0x4b, 0x3d, 0xbe, 0xe8, 0xd8, 0xc0, 0x33, 0xaf,
|
||||
0x71, 0x2d, 0x53, 0xcd, 0x46, 0x71, 0x72, 0xb3, 0x73, 0x8b, 0xef, 0xa1, 0x3a, 0xe0, 0x1b, 0xe0,
|
||||
0x31, 0x4d, 0x66, 0xd9, 0xdb, 0xc2, 0xb6, 0x8d, 0xed, 0xe4, 0xa2, 0x65, 0x7e, 0x85, 0x43, 0xa8,
|
||||
0x7d, 0xbc, 0x99, 0x15, 0x40, 0x0f, 0x1a, 0x0d, 0x57, 0xc1, 0xa4, 0xc6, 0x5e, 0xf1, 0xa3, 0x75,
|
||||
0x52, 0x8d, 0x88, 0xe3, 0xd7, 0xcd, 0x59, 0xae, 0xf7, 0x42, 0xb2, 0xfb, 0x36, 0x35, 0x1e, 0xea,
|
||||
0x32, 0x06, 0xbe, 0xdc, 0xa2, 0x8d, 0x7f, 0xb8, 0xb0, 0xc2, 0x5b, 0xf8, 0x90, 0x9a, 0x4f, 0xf0,
|
||||
0xe2, 0x82, 0xaf, 0xf3, 0xa6, 0x46, 0x47, 0x5d, 0xe4, 0xe1, 0xb7, 0xc1, 0xf9, 0x0c, 0x87, 0xdd,
|
||||
0x2b, 0xcc, 0x27, 0xc5, 0x05, 0x81, 0x55, 0xa2, 0x9b, 0x09, 0x11, 0x75, 0x92, 0x84, 0xdf, 0x04,
|
||||
0x50, 0x23, 0xaf, 0x59, 0x21, 0x1f, 0xff, 0x94, 0xb8, 0xab, 0xb0, 0x15, 0xdd, 0x94, 0x34, 0xd4,
|
||||
0x41, 0x0e, 0x7e, 0x68, 0xb4, 0xcc, 0x24, 0xbd, 0xcd, 0x52, 0xf3, 0x3a, 0xdf, 0xb0, 0x2f, 0x1f,
|
||||
0x3f, 0x00, 0x53, 0x65, 0x28, 0x76, 0xb1, 0xe6, 0x61, 0x0d, 0x05, 0x75, 0x41, 0x9f, 0x62, 0x97,
|
||||
0xe9, 0x4c, 0x0e, 0x87, 0x63, 0xa0, 0x6f, 0x17, 0xcb, 0xcf, 0xc6, 0x88, 0xfd, 0x84, 0x6f, 0x82,
|
||||
0xfc, 0x9e, 0xe5, 0xc6, 0x49, 0x2b, 0xa0, 0x77, 0x67, 0x3d, 0x12, 0x7a, 0x5f, 0xcc, 0xdd, 0x30,
|
||||
0xa6, 0xde, 0x37, 0xc0, 0x64, 0xe7, 0x53, 0xe5, 0x8b, 0xb2, 0xe8, 0x17, 0x06, 0x18, 0x6f, 0x3b,
|
||||
0x40, 0x3a, 0x18, 0xe3, 0xb6, 0x1a, 0x73, 0xaf, 0x87, 0x27, 0x81, 0xd8, 0x08, 0xbc, 0xa2, 0xd5,
|
||||
0x2d, 0xfb, 0x89, 0x01, 0xc6, 0xb2, 0x89, 0xf9, 0x0b, 0xf2, 0x52, 0xe9, 0xbd, 0x1c, 0x98, 0xec,
|
||||
0x5c, 0x83, 0x43, 0x4f, 0x75, 0x17, 0x7a, 0xde, 0xa0, 0xe9, 0xd4, 0xb2, 0x7d, 0xc7, 0x00, 0xc3,
|
||||
0x6f, 0x2b, 0xb9, 0xe4, 0x6b, 0x66, 0x2f, 0xbb, 0x42, 0xc9, 0xd1, 0x97, 0x32, 0x28, 0xd2, 0x21,
|
||||
0x4b, 0xbf, 0x37, 0xc0, 0x44, 0xc7, 0xe3, 0x1c, 0x5e, 0x03, 0x03, 0x96, 0xeb, 0x06, 0xfb, 0xa2,
|
||||
0x9b, 0xa7, 0xb5, 0xe5, 0x17, 0x38, 0x15, 0x49, 0xae, 0xe6, 0xb3, 0xdc, 0xe7, 0xe0, 0xb3, 0xd2,
|
||||
0x9f, 0x0c, 0x70, 0xe9, 0x41, 0x51, 0xf7, 0x79, 0xaf, 0xe1, 0x2c, 0x28, 0xc8, 0x62, 0xfb, 0x90,
|
||||
0xaf, 0x9f, 0xcc, 0xae, 0x32, 0x23, 0xf0, 0xd7, 0x32, 0xe2, 0x57, 0xe9, 0xd7, 0x06, 0x18, 0xab,
|
||||
0x62, 0xb2, 0xe7, 0xd4, 0x30, 0xc2, 0xdb, 0x98, 0x60, 0xbf, 0x86, 0xe1, 0x1c, 0x18, 0xe2, 0x5f,
|
||||
0x1b, 0x43, 0xab, 0x96, 0x7c, 0x23, 0x19, 0x97, 0x8e, 0x1e, 0xba, 0x9d, 0x30, 0x50, 0x2a, 0xa3,
|
||||
0xbe, 0xa7, 0xe4, 0xba, 0x7e, 0x4f, 0xb9, 0x04, 0xfa, 0xc3, 0xb4, 0x01, 0x5c, 0x60, 0x5c, 0xde,
|
||||
0xf3, 0xe5, 0x54, 0xce, 0x0d, 0x48, 0xc4, 0xbb, 0x5c, 0x79, 0xc9, 0x0d, 0x48, 0x84, 0x38, 0xb5,
|
||||
0xf4, 0x41, 0x0e, 0x9c, 0x6b, 0xcd, 0xcf, 0x0c, 0x90, 0xc4, 0x6e, 0xdb, 0x07, 0x1c, 0xc6, 0x43,
|
||||
0x9c, 0xa3, 0xbf, 0x1b, 0xc8, 0x3d, 0xf8, 0xdd, 0x00, 0x7c, 0x09, 0x8c, 0xcb, 0x9f, 0xcb, 0x07,
|
||||
0x21, 0xc1, 0x94, 0x7f, 0x99, 0xec, 0x6b, 0x7d, 0xef, 0xb7, 0x96, 0x15, 0x40, 0xed, 0x63, 0xe0,
|
||||
0x37, 0x32, 0x6f, 0x1a, 0xae, 0xa6, 0xef, 0x19, 0x58, 0x6d, 0xc7, 0x4b, 0x87, 0x7b, 0x6c, 0xcb,
|
||||
0x2f, 0x13, 0x12, 0x90, 0xcc, 0x43, 0x87, 0x39, 0x30, 0xb4, 0xcd, 0x04, 0x78, 0x9f, 0x3c, 0xdf,
|
||||
0xea, 0xf4, 0x9b, 0x09, 0x03, 0xa5, 0x32, 0xa5, 0x3f, 0x1b, 0xe0, 0x7c, 0xf2, 0x1a, 0xc8, 0x75,
|
||||
0xb0, 0x1f, 0x2d, 0x06, 0xfe, 0xb6, 0x53, 0x87, 0x17, 0x45, 0xff, 0x53, 0x6b, 0x2a, 0x26, 0xbd,
|
||||
0x4f, 0x78, 0x1f, 0x0c, 0x52, 0xb1, 0xd8, 0x32, 0x0e, 0x5f, 0x39, 0x7e, 0x1c, 0x66, 0xa3, 0x46,
|
||||
0x94, 0x6f, 0x09, 0x35, 0xc1, 0x61, 0xa1, 0x58, 0xb3, 0x2a, 0xb1, 0x6f, 0xcb, 0x1e, 0xf8, 0x88,
|
||||
0x08, 0xc5, 0xc5, 0x05, 0x41, 0x43, 0x8a, 0x5b, 0xfa, 0xa7, 0x01, 0xc6, 0xdb, 0x5e, 0x37, 0xc1,
|
||||
0x1f, 0x1a, 0x60, 0xa4, 0xa6, 0x4d, 0x4f, 0x6e, 0xe8, 0xb5, 0x93, 0xbf, 0xa0, 0xd2, 0x94, 0x8a,
|
||||
0x1a, 0x48, 0xa7, 0xa0, 0x16, 0x50, 0xb8, 0x09, 0xcc, 0x5a, 0xe6, 0x21, 0x61, 0xe6, 0xd3, 0xe4,
|
||||
0xa5, 0x66, 0xa3, 0x68, 0x2e, 0x76, 0x91, 0x41, 0x5d, 0x47, 0x57, 0xbe, 0xfb, 0xf1, 0x67, 0xd3,
|
||||
0x67, 0x3e, 0xf9, 0x6c, 0xfa, 0xcc, 0xa7, 0x9f, 0x4d, 0x9f, 0x79, 0xa7, 0x39, 0x6d, 0x7c, 0xdc,
|
||||
0x9c, 0x36, 0x3e, 0x69, 0x4e, 0x1b, 0x9f, 0x36, 0xa7, 0x8d, 0xbf, 0x35, 0xa7, 0x8d, 0x9f, 0xff,
|
||||
0x7d, 0xfa, 0xcc, 0xeb, 0x37, 0x8e, 0xfb, 0x7c, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1d,
|
||||
0x01, 0xc1, 0x04, 0x92, 0x2c, 0x00, 0x00,
|
||||
// 3137 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x5c, 0x47,
|
||||
0xf5, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0xec, 0xef, 0x8d, 0x9b, 0x78, 0x9d,
|
||||
0xcd, 0xb7, 0xc1, 0x6d, 0xd3, 0x75, 0x1b, 0x5a, 0x1a, 0xca, 0x2f, 0x79, 0x6d, 0xa7, 0x75, 0x13,
|
||||
0xc7, 0xd6, 0x6c, 0x92, 0xba, 0x2d, 0xa2, 0xbd, 0xde, 0x3b, 0xbb, 0xbe, 0xf5, 0xfd, 0x95, 0x99,
|
||||
0x7b, 0xfd, 0x43, 0x02, 0xa9, 0x02, 0x55, 0x40, 0x25, 0x28, 0x0f, 0xa8, 0x3c, 0x21, 0x84, 0x50,
|
||||
0x1f, 0xe0, 0x01, 0xde, 0xe0, 0x5f, 0xe8, 0x0b, 0x52, 0x25, 0x24, 0x54, 0x09, 0x69, 0x45, 0x97,
|
||||
0x7f, 0x00, 0x09, 0x10, 0xc2, 0x0f, 0x08, 0xcd, 0x8f, 0x3b, 0x77, 0xf6, 0xee, 0x6e, 0x12, 0xd9,
|
||||
0xeb, 0xf6, 0x6d, 0xf7, 0x9c, 0x33, 0xe7, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x67, 0xce, 0x1d, 0x60,
|
||||
0xed, 0x5c, 0xa7, 0x65, 0x27, 0x58, 0xd8, 0x89, 0xb7, 0x30, 0xf1, 0x71, 0x84, 0xe9, 0xc2, 0x2e,
|
||||
0xf6, 0xed, 0x80, 0x2c, 0x48, 0x86, 0x15, 0x3a, 0x78, 0x3f, 0xc2, 0x3e, 0x75, 0x02, 0x9f, 0x3e,
|
||||
0x6d, 0x85, 0x0e, 0xc5, 0x64, 0x17, 0x93, 0x85, 0x70, 0xa7, 0xc1, 0x78, 0xb4, 0x5d, 0x60, 0x61,
|
||||
0xf7, 0xd9, 0x85, 0x06, 0xf6, 0x31, 0xb1, 0x22, 0x6c, 0x97, 0x43, 0x12, 0x44, 0x01, 0xbc, 0x2e,
|
||||
0x34, 0x95, 0xdb, 0x04, 0xdf, 0x54, 0x9a, 0xca, 0xe1, 0x4e, 0x83, 0xf1, 0x68, 0xbb, 0x40, 0x79,
|
||||
0xf7, 0xd9, 0x99, 0xa7, 0x1b, 0x4e, 0xb4, 0x1d, 0x6f, 0x95, 0x6b, 0x81, 0xb7, 0xd0, 0x08, 0x1a,
|
||||
0xc1, 0x02, 0x57, 0xb8, 0x15, 0xd7, 0xf9, 0x3f, 0xfe, 0x87, 0xff, 0x12, 0x40, 0x33, 0xcf, 0xa5,
|
||||
0x26, 0x7b, 0x56, 0x6d, 0xdb, 0xf1, 0x31, 0x39, 0x48, 0xed, 0xf4, 0x70, 0x64, 0x75, 0x31, 0x6f,
|
||||
0x66, 0xa1, 0xd7, 0x28, 0x12, 0xfb, 0x91, 0xe3, 0xe1, 0x8e, 0x01, 0x5f, 0x7a, 0xd8, 0x00, 0x5a,
|
||||
0xdb, 0xc6, 0x9e, 0x95, 0x1d, 0x57, 0x3a, 0x34, 0xc0, 0xe4, 0x52, 0xe0, 0xef, 0x62, 0xc2, 0x26,
|
||||
0x88, 0xf0, 0xfd, 0x18, 0xd3, 0x08, 0x56, 0xc0, 0x40, 0xec, 0xd8, 0xa6, 0x31, 0x67, 0xcc, 0x8f,
|
||||
0x54, 0x9e, 0xf9, 0xa8, 0x59, 0x3c, 0xd5, 0x6a, 0x16, 0x07, 0xee, 0xae, 0x2e, 0x1f, 0x36, 0x8b,
|
||||
0x97, 0x7a, 0x21, 0x45, 0x07, 0x21, 0xa6, 0xe5, 0xbb, 0xab, 0xcb, 0x88, 0x0d, 0x86, 0x2f, 0x81,
|
||||
0x49, 0x1b, 0x53, 0x87, 0x60, 0x7b, 0x71, 0x63, 0xf5, 0x9e, 0xd0, 0x6f, 0xe6, 0xb8, 0xc6, 0xf3,
|
||||
0x52, 0xe3, 0xe4, 0x72, 0x56, 0x00, 0x75, 0x8e, 0x81, 0x9b, 0x60, 0x38, 0xd8, 0x7a, 0x1b, 0xd7,
|
||||
0x22, 0x6a, 0x0e, 0xcc, 0x0d, 0xcc, 0x8f, 0x5e, 0x7b, 0xba, 0x9c, 0x2e, 0x9e, 0x32, 0x81, 0xaf,
|
||||
0x98, 0x9c, 0x6c, 0x19, 0x59, 0x7b, 0x2b, 0xc9, 0xa2, 0x55, 0xc6, 0x25, 0xda, 0xf0, 0xba, 0xd0,
|
||||
0x82, 0x12, 0x75, 0xa5, 0x5f, 0xe5, 0x00, 0xd4, 0x27, 0x4f, 0xc3, 0xc0, 0xa7, 0xb8, 0x2f, 0xb3,
|
||||
0xa7, 0x60, 0xa2, 0xc6, 0x35, 0x47, 0xd8, 0x96, 0xb8, 0x66, 0xee, 0x28, 0xd6, 0x9b, 0x12, 0x7f,
|
||||
0x62, 0x29, 0xa3, 0x0e, 0x75, 0x00, 0xc0, 0x3b, 0x60, 0x88, 0x60, 0x1a, 0xbb, 0x91, 0x39, 0x30,
|
||||
0x67, 0xcc, 0x8f, 0x5e, 0xbb, 0xda, 0x13, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0xf2, 0xee, 0xb3, 0xe5,
|
||||
0x6a, 0x64, 0x45, 0x31, 0xad, 0x9c, 0x91, 0x48, 0x43, 0x88, 0xeb, 0x40, 0x52, 0x57, 0xe9, 0xbf,
|
||||
0x06, 0x98, 0xd0, 0xbd, 0xb4, 0xeb, 0xe0, 0x3d, 0x48, 0xc0, 0x30, 0x11, 0xc1, 0xc2, 0xfd, 0x34,
|
||||
0x7a, 0xed, 0x66, 0xf9, 0xa8, 0x3b, 0xaa, 0xdc, 0x11, 0x7f, 0x95, 0x51, 0xb6, 0x5c, 0xf2, 0x0f,
|
||||
0x4a, 0x80, 0xe0, 0x2e, 0x28, 0x10, 0xb9, 0x46, 0x3c, 0x90, 0x46, 0xaf, 0xdd, 0xea, 0x0f, 0xa8,
|
||||
0xd0, 0x59, 0x19, 0x6b, 0x35, 0x8b, 0x85, 0xe4, 0x1f, 0x52, 0x58, 0xa5, 0x5f, 0xe4, 0xc0, 0xec,
|
||||
0x52, 0x4c, 0xa3, 0xc0, 0x43, 0x98, 0x06, 0x31, 0xa9, 0xe1, 0xa5, 0xc0, 0x8d, 0x3d, 0x7f, 0x19,
|
||||
0xd7, 0x1d, 0xdf, 0x89, 0x58, 0x8c, 0xce, 0x81, 0x41, 0xdf, 0xf2, 0xb0, 0x8c, 0x99, 0x31, 0xe9,
|
||||
0xc9, 0xc1, 0xdb, 0x96, 0x87, 0x11, 0xe7, 0x30, 0x09, 0x16, 0x22, 0x72, 0x07, 0x28, 0x89, 0x3b,
|
||||
0x07, 0x21, 0x46, 0x9c, 0x03, 0xaf, 0x80, 0xa1, 0x7a, 0x40, 0x3c, 0x4b, 0xac, 0xde, 0x48, 0xba,
|
||||
0x1e, 0x37, 0x38, 0x15, 0x49, 0x2e, 0x7c, 0x1e, 0x8c, 0xda, 0x98, 0xd6, 0x88, 0x13, 0x32, 0x68,
|
||||
0x73, 0x90, 0x0b, 0x9f, 0x95, 0xc2, 0xa3, 0xcb, 0x29, 0x0b, 0xe9, 0x72, 0xf0, 0x2a, 0x28, 0x84,
|
||||
0xc4, 0x09, 0x88, 0x13, 0x1d, 0x98, 0xf9, 0x39, 0x63, 0x3e, 0x5f, 0x99, 0x90, 0x63, 0x0a, 0x1b,
|
||||
0x92, 0x8e, 0x94, 0x04, 0x93, 0x7e, 0x9b, 0x06, 0xfe, 0x86, 0x15, 0x6d, 0x9b, 0x43, 0x1c, 0x41,
|
||||
0x49, 0xbf, 0x52, 0x5d, 0xbf, 0xcd, 0xe8, 0x48, 0x49, 0x94, 0xfe, 0x6c, 0x00, 0x33, 0xeb, 0xa1,
|
||||
0xc4, 0xbd, 0xf0, 0x06, 0x28, 0xd0, 0x88, 0xe5, 0x9c, 0xc6, 0x81, 0xf4, 0xcf, 0x93, 0x89, 0xaa,
|
||||
0xaa, 0xa4, 0x1f, 0x36, 0x8b, 0xd3, 0xe9, 0x88, 0x84, 0xca, 0x7d, 0xa3, 0xc6, 0xb2, 0x90, 0xdb,
|
||||
0xc3, 0x5b, 0xdb, 0x41, 0xb0, 0x23, 0x57, 0xff, 0x18, 0x21, 0xf7, 0xaa, 0x50, 0x94, 0x62, 0x8a,
|
||||
0x90, 0x93, 0x64, 0x94, 0x00, 0x95, 0xfe, 0x93, 0xcb, 0x4e, 0x4c, 0x5b, 0xf4, 0xb7, 0x40, 0x81,
|
||||
0x6d, 0x21, 0xdb, 0x8a, 0x2c, 0xb9, 0x09, 0x9e, 0x79, 0xb4, 0x0d, 0x27, 0xf6, 0xeb, 0x1a, 0x8e,
|
||||
0xac, 0x0a, 0x94, 0xae, 0x00, 0x29, 0x0d, 0x29, 0xad, 0x70, 0x1f, 0x0c, 0xd2, 0x10, 0xd7, 0xe4,
|
||||
0x7c, 0xef, 0x1d, 0x23, 0xda, 0x7b, 0xcc, 0xa1, 0x1a, 0xe2, 0x5a, 0x1a, 0x8c, 0xec, 0x1f, 0xe2,
|
||||
0x88, 0xf0, 0x1d, 0x03, 0x0c, 0x51, 0x9e, 0x17, 0x64, 0x2e, 0xd9, 0x3c, 0x01, 0xf0, 0x4c, 0xde,
|
||||
0x11, 0xff, 0x91, 0xc4, 0x2d, 0xfd, 0x33, 0x07, 0x2e, 0xf5, 0x1a, 0xba, 0x14, 0xf8, 0xb6, 0x58,
|
||||
0x84, 0x55, 0xb9, 0xaf, 0x44, 0x64, 0x3d, 0xaf, 0xef, 0xab, 0xc3, 0x66, 0xf1, 0xf1, 0x87, 0x2a,
|
||||
0xd0, 0x36, 0xe0, 0x97, 0xd5, 0x94, 0xc5, 0x26, 0xbd, 0xd4, 0x6e, 0xd8, 0x61, 0xb3, 0x38, 0xae,
|
||||
0x86, 0xb5, 0xdb, 0x0a, 0x77, 0x01, 0x74, 0x2d, 0x1a, 0xdd, 0x21, 0x96, 0x4f, 0x85, 0x5a, 0xc7,
|
||||
0xc3, 0xd2, 0x73, 0x4f, 0x3e, 0x5a, 0x50, 0xb0, 0x11, 0x95, 0x19, 0x09, 0x09, 0x6f, 0x75, 0x68,
|
||||
0x43, 0x5d, 0x10, 0x58, 0xce, 0x20, 0xd8, 0xa2, 0x2a, 0x0d, 0x68, 0x39, 0x9c, 0x51, 0x91, 0xe4,
|
||||
0xc2, 0x27, 0xc0, 0xb0, 0x87, 0x29, 0xb5, 0x1a, 0x98, 0xef, 0xfd, 0x91, 0xf4, 0x50, 0x5c, 0x13,
|
||||
0x64, 0x94, 0xf0, 0x4b, 0xff, 0x32, 0xc0, 0x85, 0x5e, 0x5e, 0xbb, 0xe5, 0xd0, 0x08, 0x7e, 0xb3,
|
||||
0x23, 0xec, 0xcb, 0x8f, 0x36, 0x43, 0x36, 0x9a, 0x07, 0xbd, 0x4a, 0x25, 0x09, 0x45, 0x0b, 0xf9,
|
||||
0x3d, 0x90, 0x77, 0x22, 0xec, 0x25, 0xa7, 0x25, 0xea, 0x7f, 0xd8, 0x55, 0x4e, 0x4b, 0xf8, 0xfc,
|
||||
0x2a, 0x03, 0x42, 0x02, 0xaf, 0xf4, 0x61, 0x0e, 0x5c, 0xec, 0x35, 0x84, 0xe5, 0x71, 0xca, 0x9c,
|
||||
0x1d, 0xba, 0x31, 0xb1, 0x5c, 0x19, 0x6c, 0xca, 0xd9, 0x1b, 0x9c, 0x8a, 0x24, 0x97, 0xe5, 0x4e,
|
||||
0xea, 0xf8, 0x8d, 0xd8, 0xb5, 0x88, 0x8c, 0x24, 0x35, 0xe1, 0xaa, 0xa4, 0x23, 0x25, 0x01, 0xcb,
|
||||
0x00, 0xd0, 0xed, 0x80, 0x44, 0x1c, 0x83, 0x57, 0x38, 0x23, 0x95, 0x33, 0x2c, 0x23, 0x54, 0x15,
|
||||
0x15, 0x69, 0x12, 0xec, 0x20, 0xd9, 0x71, 0x7c, 0x5b, 0x2e, 0xb8, 0xda, 0xbb, 0x37, 0x1d, 0xdf,
|
||||
0x46, 0x9c, 0xc3, 0xf0, 0x5d, 0x87, 0x46, 0x8c, 0x22, 0x57, 0xbb, 0xcd, 0xe1, 0x5c, 0x52, 0x49,
|
||||
0x30, 0xfc, 0x1a, 0x4b, 0xb0, 0x01, 0x71, 0x30, 0x35, 0x87, 0x52, 0xfc, 0x25, 0x45, 0x45, 0x9a,
|
||||
0x44, 0xe9, 0x2f, 0x83, 0xbd, 0xe3, 0x83, 0x25, 0x10, 0x78, 0x19, 0xe4, 0x1b, 0x24, 0x88, 0x43,
|
||||
0xe9, 0x25, 0xe5, 0xed, 0x97, 0x18, 0x11, 0x09, 0x1e, 0xfc, 0x36, 0xc8, 0xfb, 0x72, 0xc2, 0x2c,
|
||||
0x82, 0x5e, 0xed, 0xff, 0x32, 0x73, 0x6f, 0xa5, 0xe8, 0xc2, 0x91, 0x02, 0x14, 0x3e, 0x07, 0xf2,
|
||||
0xb4, 0x16, 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x32, 0xe2, 0x61, 0xb3, 0x78, 0x3a, 0x51,
|
||||
0xc7, 0x09, 0x48, 0x08, 0xc3, 0xef, 0x1b, 0xa0, 0x20, 0x8f, 0x0b, 0x6a, 0x0e, 0xf3, 0xf0, 0x7c,
|
||||
0xad, 0xff, 0x76, 0xcb, 0xb2, 0x37, 0x5d, 0x33, 0x49, 0xa0, 0x48, 0x81, 0xc3, 0xef, 0x1a, 0x00,
|
||||
0xd4, 0xd4, 0xd9, 0x65, 0x8e, 0x70, 0x1f, 0xf6, 0x6d, 0xab, 0x68, 0xa7, 0xa2, 0x08, 0x84, 0xb4,
|
||||
0x54, 0xd2, 0x50, 0x61, 0x15, 0x4c, 0x85, 0x04, 0x73, 0xdd, 0x77, 0xfd, 0x1d, 0x3f, 0xd8, 0xf3,
|
||||
0x6f, 0x38, 0xd8, 0xb5, 0xa9, 0x09, 0xe6, 0x8c, 0xf9, 0x42, 0xe5, 0xa2, 0xb4, 0x7f, 0x6a, 0xa3,
|
||||
0x9b, 0x10, 0xea, 0x3e, 0xb6, 0xf4, 0xee, 0x40, 0xb6, 0xd6, 0xca, 0x9e, 0x17, 0xf0, 0x7d, 0x31,
|
||||
0x79, 0x91, 0x87, 0xa9, 0x69, 0xf0, 0x85, 0x78, 0xa3, 0xff, 0x0b, 0xa1, 0x72, 0x7d, 0x7a, 0x48,
|
||||
0x2b, 0x12, 0x45, 0x9a, 0x09, 0xf0, 0xa7, 0x06, 0x38, 0x6d, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d,
|
||||
0xb6, 0x71, 0xee, 0x64, 0xa3, 0x7a, 0x4a, 0x1a, 0x74, 0x7a, 0x51, 0x47, 0x45, 0xed, 0x46, 0xc0,
|
||||
0x17, 0xc1, 0x19, 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x5b, 0xcd, 0xe2, 0x99,
|
||||
0x6a, 0x1b, 0x07, 0x65, 0x24, 0x4b, 0x1f, 0xe7, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef,
|
||||
0x15, 0x30, 0xc4, 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3,
|
||||
0x89, 0xe1, 0xb3, 0xe3, 0x69, 0x80, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x6b,
|
||||
0x00, 0xd8, 0x38, 0x24, 0x98, 0x65, 0x24, 0xdb, 0x1c, 0xe6, 0xd2, 0x6a, 0x7d, 0x96, 0x15, 0x07,
|
||||
0x69, 0x52, 0xf0, 0x06, 0x80, 0xc9, 0x3f, 0x27, 0xf0, 0x5f, 0xb5, 0x88, 0xef, 0xf8, 0x0d, 0xb3,
|
||||
0xc0, 0xcd, 0x9e, 0x66, 0xa7, 0xed, 0x72, 0x07, 0x17, 0x75, 0x19, 0x01, 0x77, 0xc1, 0x90, 0xb8,
|
||||
0x46, 0xf3, 0xbc, 0xd1, 0xc7, 0x1d, 0x77, 0xcf, 0x72, 0x1d, 0x9b, 0x43, 0x55, 0x00, 0x77, 0x0f,
|
||||
0x47, 0x41, 0x12, 0x0d, 0xbe, 0x67, 0x80, 0x31, 0x1a, 0x6f, 0x11, 0x29, 0x4d, 0x79, 0x56, 0x1f,
|
||||
0xbd, 0x76, 0xa7, 0x5f, 0xf0, 0x55, 0x4d, 0x77, 0x65, 0xa2, 0xd5, 0x2c, 0x8e, 0xe9, 0x14, 0xd4,
|
||||
0x86, 0x0d, 0x7f, 0x6f, 0x00, 0xd3, 0xb2, 0x45, 0xe8, 0x5b, 0xee, 0x06, 0x71, 0xfc, 0x08, 0x13,
|
||||
0x71, 0x21, 0x12, 0xc7, 0x47, 0x1f, 0x6b, 0xc5, 0xec, 0x3d, 0xab, 0x32, 0x27, 0x57, 0xda, 0x5c,
|
||||
0xec, 0x61, 0x01, 0xea, 0x69, 0x5b, 0xe9, 0xdf, 0x46, 0x36, 0xb5, 0x68, 0xb3, 0xac, 0xd6, 0x2c,
|
||||
0x17, 0xc3, 0x65, 0x30, 0xc1, 0xaa, 0x5f, 0x84, 0x43, 0xd7, 0xa9, 0x59, 0x94, 0xdf, 0x7e, 0x44,
|
||||
0x74, 0xab, 0x6b, 0x78, 0x35, 0xc3, 0x47, 0x1d, 0x23, 0xe0, 0x2b, 0x00, 0x8a, 0xb2, 0xb0, 0x4d,
|
||||
0x8f, 0xa8, 0x04, 0x54, 0x81, 0x57, 0xed, 0x90, 0x40, 0x5d, 0x46, 0xc1, 0x25, 0x30, 0xe9, 0x5a,
|
||||
0x5b, 0xd8, 0xad, 0x62, 0x17, 0xd7, 0xa2, 0x80, 0x70, 0x55, 0xe2, 0x7e, 0x38, 0xd5, 0x6a, 0x16,
|
||||
0x27, 0x6f, 0x65, 0x99, 0xa8, 0x53, 0xbe, 0x74, 0x29, 0xbb, 0x97, 0xf5, 0x89, 0x8b, 0x62, 0xfb,
|
||||
0x83, 0x1c, 0x98, 0xe9, 0x1d, 0x14, 0xf0, 0x3b, 0xaa, 0x34, 0x16, 0x15, 0xdf, 0x6b, 0x27, 0x10,
|
||||
0x7a, 0xf2, 0x3a, 0x00, 0x3a, 0xaf, 0x02, 0xf0, 0x80, 0x9d, 0xd7, 0x96, 0x9b, 0x5c, 0xfb, 0x37,
|
||||
0x4f, 0x02, 0x9d, 0xe9, 0xaf, 0x8c, 0x88, 0x2a, 0xc0, 0x72, 0xf9, 0xa1, 0x6f, 0xb9, 0xb8, 0xf4,
|
||||
0x61, 0xc7, 0xd5, 0x36, 0xdd, 0xac, 0xf0, 0x07, 0x06, 0x18, 0x0f, 0x42, 0xec, 0x2f, 0x6e, 0xac,
|
||||
0xde, 0xfb, 0xa2, 0xd8, 0xb4, 0xd2, 0x41, 0xab, 0x47, 0x37, 0x91, 0xdd, 0xaf, 0x85, 0xae, 0x0d,
|
||||
0x12, 0x84, 0xb4, 0x72, 0xb6, 0xd5, 0x2c, 0x8e, 0xaf, 0xb7, 0xa3, 0xa0, 0x2c, 0x6c, 0xc9, 0x03,
|
||||
0x53, 0x2b, 0xfb, 0x11, 0x26, 0xbe, 0xe5, 0x2e, 0x07, 0xb5, 0xd8, 0xc3, 0x7e, 0x24, 0x6c, 0xcc,
|
||||
0xb4, 0x0b, 0x8c, 0x47, 0x6c, 0x17, 0x5c, 0x04, 0x03, 0x31, 0x71, 0x65, 0xd4, 0x8e, 0xaa, 0x26,
|
||||
0x18, 0xba, 0x85, 0x18, 0xbd, 0x74, 0x09, 0x0c, 0x32, 0x3b, 0xe1, 0x79, 0x30, 0x40, 0xac, 0x3d,
|
||||
0xae, 0x75, 0xac, 0x32, 0xcc, 0x44, 0x90, 0xb5, 0x87, 0x18, 0xad, 0xf4, 0xf7, 0x39, 0x30, 0x9e,
|
||||
0x99, 0x0b, 0x9c, 0x01, 0x39, 0xd5, 0x59, 0x03, 0x52, 0x69, 0x6e, 0x75, 0x19, 0xe5, 0x1c, 0x1b,
|
||||
0xbe, 0xa0, 0xb2, 0xab, 0x00, 0x2d, 0xaa, 0xc3, 0x82, 0x53, 0x59, 0x59, 0x96, 0xaa, 0x63, 0x86,
|
||||
0x24, 0xe9, 0x91, 0xd9, 0x80, 0xeb, 0x72, 0x57, 0x08, 0x1b, 0x70, 0x1d, 0x31, 0xda, 0x51, 0x7b,
|
||||
0x25, 0x49, 0xb3, 0x26, 0xff, 0x08, 0xcd, 0x9a, 0xa1, 0x07, 0x36, 0x6b, 0x2e, 0x83, 0x7c, 0xe4,
|
||||
0x44, 0x2e, 0xe6, 0x27, 0x95, 0x56, 0x0c, 0xdf, 0x61, 0x44, 0x24, 0x78, 0x10, 0x83, 0x61, 0x1b,
|
||||
0xd7, 0xad, 0xd8, 0x8d, 0xf8, 0xa1, 0x34, 0x7a, 0xed, 0xeb, 0xc7, 0x8b, 0x1e, 0xd1, 0xcc, 0x58,
|
||||
0x16, 0x2a, 0x51, 0xa2, 0x1b, 0x3e, 0x0e, 0x86, 0x3d, 0x6b, 0xdf, 0xf1, 0x62, 0x8f, 0x57, 0x8c,
|
||||
0x86, 0x10, 0x5b, 0x13, 0x24, 0x94, 0xf0, 0x58, 0x12, 0xc4, 0xfb, 0x35, 0x37, 0xa6, 0xce, 0x2e,
|
||||
0x96, 0x4c, 0x59, 0xd2, 0xa9, 0x24, 0xb8, 0x92, 0xe1, 0xa3, 0x8e, 0x11, 0x1c, 0xcc, 0xf1, 0xf9,
|
||||
0xe0, 0x51, 0x0d, 0x4c, 0x90, 0x50, 0xc2, 0x6b, 0x07, 0x93, 0xf2, 0x63, 0xbd, 0xc0, 0xe4, 0xe0,
|
||||
0x8e, 0x11, 0xf0, 0x29, 0x30, 0xe2, 0x59, 0xfb, 0xb7, 0xb0, 0xdf, 0x88, 0xb6, 0xcd, 0xd3, 0x73,
|
||||
0xc6, 0xfc, 0x40, 0xe5, 0x74, 0xab, 0x59, 0x1c, 0x59, 0x4b, 0x88, 0x28, 0xe5, 0x73, 0x61, 0xc7,
|
||||
0x97, 0xc2, 0x67, 0x34, 0xe1, 0x84, 0x88, 0x52, 0x3e, 0xab, 0x4c, 0x42, 0x2b, 0x62, 0xfb, 0xca,
|
||||
0x1c, 0x6f, 0xbf, 0x38, 0x6f, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x1e, 0x14, 0x3c, 0x6b, 0x9f, 0xdf,
|
||||
0x29, 0xcd, 0x09, 0xae, 0x96, 0x37, 0x14, 0xd7, 0x24, 0x0d, 0x29, 0x2e, 0x97, 0x74, 0x7c, 0x21,
|
||||
0x39, 0xa9, 0x49, 0x4a, 0x1a, 0x52, 0x5c, 0x16, 0xbf, 0xb1, 0xef, 0xdc, 0x8f, 0xb1, 0x10, 0x86,
|
||||
0xdc, 0x33, 0x2a, 0x7e, 0xef, 0xa6, 0x2c, 0xa4, 0xcb, 0xb1, 0x3b, 0x9d, 0x17, 0xbb, 0x91, 0x13,
|
||||
0xba, 0x78, 0xbd, 0x6e, 0x9e, 0xe5, 0xfe, 0xe7, 0xa5, 0xfc, 0x9a, 0xa2, 0x22, 0x4d, 0x02, 0xbe,
|
||||
0x05, 0x06, 0xb1, 0x1f, 0x7b, 0xe6, 0x39, 0x7e, 0x7c, 0x1f, 0x37, 0xfa, 0xd4, 0x7e, 0x59, 0xf1,
|
||||
0x63, 0x0f, 0x71, 0xcd, 0xf0, 0x05, 0x70, 0xda, 0xb3, 0xf6, 0x59, 0x12, 0xc0, 0x24, 0x62, 0x17,
|
||||
0xcd, 0x29, 0x3e, 0xef, 0x49, 0x56, 0xc4, 0xae, 0xe9, 0x0c, 0xd4, 0x2e, 0xc7, 0x07, 0x3a, 0xbe,
|
||||
0x36, 0x70, 0x5a, 0x1b, 0xa8, 0x33, 0x50, 0xbb, 0x1c, 0x73, 0x32, 0xc1, 0xf7, 0x63, 0x87, 0x60,
|
||||
0xdb, 0xfc, 0x3f, 0x5e, 0xf7, 0xca, 0xfe, 0xae, 0xa0, 0x21, 0xc5, 0x85, 0xf7, 0x93, 0x96, 0x83,
|
||||
0xc9, 0x37, 0xdf, 0x46, 0xdf, 0x52, 0xf7, 0x3a, 0x59, 0x24, 0xc4, 0x3a, 0x10, 0xa7, 0x8a, 0xde,
|
||||
0x6c, 0x80, 0x3e, 0xc8, 0x5b, 0xae, 0xbb, 0x5e, 0x37, 0xcf, 0x73, 0x8f, 0xf7, 0xf1, 0xb4, 0x50,
|
||||
0x19, 0x66, 0x91, 0xe9, 0x47, 0x02, 0x86, 0xe1, 0x05, 0x3e, 0x8b, 0x85, 0x99, 0x13, 0xc3, 0x5b,
|
||||
0x67, 0xfa, 0x91, 0x80, 0xe1, 0xf3, 0xf3, 0x0f, 0xd6, 0xeb, 0xe6, 0x63, 0x27, 0x37, 0x3f, 0xa6,
|
||||
0x1f, 0x09, 0x18, 0x68, 0x83, 0x01, 0x3f, 0x88, 0xcc, 0x0b, 0xfd, 0x3e, 0x7b, 0xf9, 0x69, 0x72,
|
||||
0x3b, 0x88, 0x10, 0x53, 0x0f, 0x7f, 0x64, 0x00, 0x10, 0xa6, 0x91, 0x78, 0xf1, 0xb8, 0x2d, 0x80,
|
||||
0x0c, 0x5a, 0x39, 0x8d, 0xde, 0x15, 0x3f, 0x22, 0x07, 0xe9, 0xbd, 0x46, 0x8b, 0x72, 0xcd, 0x00,
|
||||
0xf8, 0x73, 0x03, 0x9c, 0xd3, 0xcb, 0x5d, 0x65, 0xd9, 0x2c, 0xf7, 0xc3, 0x7a, 0x1f, 0x03, 0xb9,
|
||||
0x12, 0x04, 0x6e, 0xc5, 0x6c, 0x35, 0x8b, 0xe7, 0x16, 0xbb, 0x00, 0xa2, 0xae, 0x66, 0xc0, 0x5f,
|
||||
0x1b, 0x60, 0x52, 0x66, 0x47, 0xcd, 0xb8, 0x22, 0x77, 0xdb, 0x5b, 0x7d, 0x74, 0x5b, 0x16, 0x42,
|
||||
0x78, 0x4f, 0x7d, 0x65, 0xec, 0xe0, 0xa3, 0x4e, 0xab, 0xe0, 0xef, 0x0c, 0x30, 0x66, 0xe3, 0x10,
|
||||
0xfb, 0x36, 0xf6, 0x6b, 0xcc, 0xcc, 0xb9, 0xe3, 0xf6, 0x15, 0xb2, 0x66, 0x2e, 0x6b, 0xda, 0x85,
|
||||
0x85, 0x65, 0x69, 0xe1, 0x98, 0xce, 0x3a, 0x6c, 0x16, 0xa7, 0xd3, 0xa1, 0x3a, 0x07, 0xb5, 0x19,
|
||||
0x08, 0x7f, 0x6c, 0x80, 0xf1, 0xd4, 0xed, 0xe2, 0x80, 0xb8, 0x74, 0x32, 0x0b, 0xcf, 0x4b, 0xd0,
|
||||
0xc5, 0x76, 0x2c, 0x94, 0x05, 0x87, 0xbf, 0x31, 0x58, 0xb5, 0x95, 0xdc, 0xd5, 0xa8, 0x59, 0xe2,
|
||||
0x1e, 0x7c, 0xbd, 0x9f, 0x1e, 0x54, 0xca, 0x85, 0x03, 0xaf, 0xa6, 0x95, 0x9c, 0xe2, 0x1c, 0x36,
|
||||
0x8b, 0x53, 0xba, 0xff, 0x14, 0x03, 0xe9, 0xc6, 0xc1, 0x77, 0x0d, 0x30, 0x86, 0xd3, 0x82, 0x99,
|
||||
0x9a, 0x97, 0x8f, 0xeb, 0xba, 0xae, 0xe5, 0xb7, 0xb8, 0x4e, 0x6b, 0x2c, 0x8a, 0xda, 0x60, 0x59,
|
||||
0xed, 0x87, 0xf7, 0x2d, 0x2f, 0x74, 0xb1, 0xf9, 0xff, 0xfd, 0xab, 0xfd, 0x56, 0x84, 0x4a, 0x94,
|
||||
0xe8, 0x86, 0x57, 0x41, 0xc1, 0x8f, 0x5d, 0xd7, 0xda, 0x72, 0xb1, 0xf9, 0x38, 0xaf, 0x22, 0x54,
|
||||
0x7f, 0xf1, 0xb6, 0xa4, 0x23, 0x25, 0x01, 0xeb, 0x60, 0x6e, 0xff, 0xa6, 0x7a, 0x7c, 0xd1, 0xb5,
|
||||
0x81, 0x67, 0x5e, 0xe1, 0x5a, 0x66, 0x5a, 0xcd, 0xe2, 0xf4, 0x66, 0xf7, 0x16, 0xdf, 0x43, 0x75,
|
||||
0xc0, 0x37, 0xc0, 0x63, 0x9a, 0xcc, 0x8a, 0xb7, 0x85, 0x6d, 0x1b, 0xdb, 0xc9, 0x45, 0xcb, 0xfc,
|
||||
0x02, 0x87, 0x50, 0xfb, 0x78, 0x33, 0x2b, 0x80, 0x1e, 0x34, 0x1a, 0xde, 0x02, 0xd3, 0x1a, 0x7b,
|
||||
0xd5, 0x8f, 0xd6, 0x49, 0x35, 0x22, 0x8e, 0xdf, 0x30, 0xe7, 0xb9, 0xde, 0x73, 0xc9, 0xee, 0xdb,
|
||||
0xd4, 0x78, 0xa8, 0xc7, 0x18, 0xf8, 0x72, 0x9b, 0x36, 0xfe, 0xe1, 0xc2, 0x0a, 0x6f, 0xe2, 0x03,
|
||||
0x6a, 0x3e, 0xc1, 0x8b, 0x0b, 0xbe, 0xce, 0x9b, 0x1a, 0x1d, 0xf5, 0x90, 0x87, 0xdf, 0x00, 0x67,
|
||||
0x33, 0x1c, 0x76, 0xaf, 0x30, 0x9f, 0x14, 0x17, 0x04, 0x56, 0x89, 0x6e, 0x26, 0x44, 0xd4, 0x4d,
|
||||
0x12, 0x7e, 0x15, 0x40, 0x8d, 0xbc, 0x66, 0x85, 0x7c, 0xfc, 0x53, 0xe2, 0xae, 0xc2, 0x56, 0x74,
|
||||
0x53, 0xd2, 0x50, 0x17, 0x39, 0xf8, 0x81, 0xd1, 0x36, 0x93, 0xf4, 0x36, 0x4b, 0xcd, 0xab, 0x7c,
|
||||
0xc3, 0xbe, 0x7c, 0xf4, 0x00, 0x4c, 0x95, 0xa1, 0xd8, 0xc5, 0x9a, 0x87, 0x35, 0x14, 0xd4, 0x03,
|
||||
0x7d, 0x86, 0x5d, 0xa6, 0x33, 0x39, 0x1c, 0x4e, 0x80, 0x81, 0x1d, 0x2c, 0x3f, 0x1b, 0x23, 0xf6,
|
||||
0x13, 0xbe, 0x09, 0xf2, 0xbb, 0x96, 0x1b, 0x27, 0xad, 0x80, 0xfe, 0x9d, 0xf5, 0x48, 0xe8, 0x7d,
|
||||
0x31, 0x77, 0xdd, 0x98, 0x79, 0xdf, 0x00, 0xd3, 0xdd, 0x4f, 0x95, 0xcf, 0xcb, 0xa2, 0x9f, 0x19,
|
||||
0x60, 0xb2, 0xe3, 0x00, 0xe9, 0x62, 0x8c, 0xdb, 0x6e, 0xcc, 0xbd, 0x3e, 0x9e, 0x04, 0x62, 0x23,
|
||||
0xf0, 0x8a, 0x56, 0xb7, 0xec, 0x87, 0x06, 0x98, 0xc8, 0x26, 0xe6, 0xcf, 0xc9, 0x4b, 0xa5, 0xf7,
|
||||
0x72, 0x60, 0xba, 0x7b, 0x0d, 0x0e, 0x3d, 0xd5, 0x5d, 0xe8, 0x7b, 0x83, 0xa6, 0x5b, 0xcb, 0xf6,
|
||||
0x1d, 0x03, 0x8c, 0xbe, 0xad, 0xe4, 0x92, 0xaf, 0x99, 0xfd, 0xec, 0x0a, 0x25, 0x47, 0x5f, 0xca,
|
||||
0xa0, 0x48, 0x87, 0x2c, 0xfd, 0xd6, 0x00, 0x53, 0x5d, 0x8f, 0x73, 0x78, 0x05, 0x0c, 0x59, 0xae,
|
||||
0x1b, 0xec, 0x89, 0x6e, 0x9e, 0xd6, 0x96, 0x5f, 0xe4, 0x54, 0x24, 0xb9, 0x9a, 0xcf, 0x72, 0x9f,
|
||||
0x81, 0xcf, 0x4a, 0x7f, 0x30, 0xc0, 0x85, 0x07, 0x45, 0xdd, 0x67, 0xbd, 0x86, 0xf3, 0xa0, 0x20,
|
||||
0x8b, 0xed, 0x03, 0xbe, 0x7e, 0x32, 0xbb, 0xca, 0x8c, 0xc0, 0x5f, 0xcb, 0x88, 0x5f, 0xa5, 0x5f,
|
||||
0x1a, 0x60, 0xa2, 0x8a, 0xc9, 0xae, 0x53, 0xc3, 0x08, 0xd7, 0x31, 0xc1, 0x7e, 0x0d, 0xc3, 0x05,
|
||||
0x30, 0xc2, 0xbf, 0x36, 0x86, 0x56, 0x2d, 0xf9, 0x46, 0x32, 0x29, 0x1d, 0x3d, 0x72, 0x3b, 0x61,
|
||||
0xa0, 0x54, 0x46, 0x7d, 0x4f, 0xc9, 0xf5, 0xfc, 0x9e, 0x72, 0x01, 0x0c, 0x86, 0x69, 0x03, 0xb8,
|
||||
0xc0, 0xb8, 0xbc, 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0xf2, 0x92, 0x1b, 0x90,
|
||||
0x08, 0x71, 0x6a, 0xe9, 0x4f, 0x39, 0x70, 0xa6, 0x3d, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x1d, 0x1f,
|
||||
0x70, 0x18, 0x0f, 0x71, 0x8e, 0xfe, 0x6e, 0x20, 0xf7, 0xe0, 0x77, 0x03, 0xf0, 0x25, 0x30, 0x29,
|
||||
0x7f, 0xae, 0xec, 0x87, 0x04, 0x53, 0xfe, 0x65, 0x72, 0xa0, 0xfd, 0xbd, 0xdf, 0x5a, 0x56, 0x00,
|
||||
0x75, 0x8e, 0x81, 0x5f, 0xc9, 0xbc, 0x69, 0xb8, 0x9c, 0xbe, 0x67, 0x60, 0xb5, 0x1d, 0x2f, 0x1d,
|
||||
0xee, 0xb1, 0x2d, 0xbf, 0x42, 0x48, 0x40, 0x32, 0x0f, 0x1d, 0x16, 0xc0, 0x48, 0x9d, 0x09, 0xf0,
|
||||
0x3e, 0x79, 0xbe, 0xdd, 0xe9, 0x37, 0x12, 0x06, 0x4a, 0x65, 0xe0, 0xd7, 0xc0, 0x78, 0x10, 0x8a,
|
||||
0x2a, 0x76, 0xdd, 0xb5, 0xab, 0xd8, 0xad, 0xf3, 0x8e, 0x5e, 0x21, 0x69, 0xbb, 0xb6, 0xb1, 0x50,
|
||||
0x56, 0xb6, 0xf4, 0x47, 0x03, 0x9c, 0x4d, 0x1e, 0x13, 0xb9, 0x0e, 0xf6, 0xa3, 0xa5, 0xc0, 0xaf,
|
||||
0x3b, 0x0d, 0x78, 0x5e, 0xb4, 0x4f, 0xb5, 0x9e, 0x64, 0xd2, 0x3a, 0x85, 0xf7, 0xc1, 0x30, 0x15,
|
||||
0xb1, 0x22, 0xc3, 0xf8, 0x95, 0xa3, 0x87, 0x71, 0x36, 0xe8, 0x44, 0xf5, 0x97, 0x50, 0x13, 0x1c,
|
||||
0x16, 0xc9, 0x35, 0xab, 0x12, 0xfb, 0xb6, 0x6c, 0xa1, 0x8f, 0x89, 0x48, 0x5e, 0x5a, 0x14, 0x34,
|
||||
0xa4, 0xb8, 0xa5, 0x7f, 0x18, 0x60, 0xb2, 0xe3, 0x71, 0x14, 0xfc, 0x9e, 0x01, 0xc6, 0x6a, 0xda,
|
||||
0xf4, 0x64, 0x3e, 0x58, 0x3b, 0xfe, 0x03, 0x2c, 0x4d, 0xa9, 0x28, 0xa1, 0x74, 0x0a, 0x6a, 0x03,
|
||||
0x85, 0x9b, 0xc0, 0xac, 0x65, 0xde, 0x21, 0x66, 0xbe, 0x6c, 0x5e, 0x68, 0x35, 0x8b, 0xe6, 0x52,
|
||||
0x0f, 0x19, 0xd4, 0x73, 0x74, 0xe5, 0x5b, 0x1f, 0x7d, 0x3a, 0x7b, 0xea, 0xe3, 0x4f, 0x67, 0x4f,
|
||||
0x7d, 0xf2, 0xe9, 0xec, 0xa9, 0x77, 0x5a, 0xb3, 0xc6, 0x47, 0xad, 0x59, 0xe3, 0xe3, 0xd6, 0xac,
|
||||
0xf1, 0x49, 0x6b, 0xd6, 0xf8, 0x6b, 0x6b, 0xd6, 0xf8, 0xc9, 0xdf, 0x66, 0x4f, 0xbd, 0x7e, 0xfd,
|
||||
0xa8, 0xaf, 0x8f, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x28, 0x77, 0xf5, 0x22, 0xd1, 0x2c, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
func (m *ConversionRequest) Marshal() (dAtA []byte, err error) {
|
||||
@ -2633,6 +2635,16 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.OptionalOldSelf != nil {
|
||||
i--
|
||||
if *m.OptionalOldSelf {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x30
|
||||
}
|
||||
i -= len(m.FieldPath)
|
||||
copy(dAtA[i:], m.FieldPath)
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath)))
|
||||
@ -3367,6 +3379,9 @@ func (m *ValidationRule) Size() (n int) {
|
||||
}
|
||||
l = len(m.FieldPath)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if m.OptionalOldSelf != nil {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -3845,6 +3860,7 @@ func (this *ValidationRule) String() string {
|
||||
`MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
|
||||
`Reason:` + valueToStringGenerated(this.Reason) + `,`,
|
||||
`FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`,
|
||||
`OptionalOldSelf:` + valueToStringGenerated(this.OptionalOldSelf) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -9008,6 +9024,27 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.FieldPath = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field OptionalOldSelf", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
b := bool(v != 0)
|
||||
m.OptionalOldSelf = &b
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
30
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
generated
vendored
30
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
generated
vendored
@ -658,6 +658,18 @@ message ValidationRule {
|
||||
// - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
|
||||
// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
|
||||
// non-intersecting keys are appended, retaining their partial order.
|
||||
//
|
||||
// If `rule` makes use of the `oldSelf` variable it is implicitly a
|
||||
// `transition rule`.
|
||||
//
|
||||
// By default, the `oldSelf` variable is the same type as `self`.
|
||||
// When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional
|
||||
// variable whose value() is the same type as `self`.
|
||||
// See the documentation for the `optionalOldSelf` field for details.
|
||||
//
|
||||
// Transition rules by default are applied only on UPDATE requests and are
|
||||
// skipped if an old value could not be found. You can opt a transition
|
||||
// rule into unconditional evaluation by setting `optionalOldSelf` to true.
|
||||
optional string rule = 1;
|
||||
|
||||
// Message represents the message displayed when validation fails. The message is required if the Rule contains
|
||||
@ -698,6 +710,24 @@ message ValidationRule {
|
||||
// e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
|
||||
// +optional
|
||||
optional string fieldPath = 5;
|
||||
|
||||
// optionalOldSelf is used to opt a transition rule into evaluation
|
||||
// even when the object is first created, or if the old object is
|
||||
// missing the value.
|
||||
//
|
||||
// When enabled `oldSelf` will be a CEL optional whose value will be
|
||||
// `None` if there is no old value, or when the object is initially created.
|
||||
//
|
||||
// You may check for presence of oldSelf using `oldSelf.hasValue()` and
|
||||
// unwrap it after checking using `oldSelf.value()`. Check the CEL
|
||||
// documentation for Optional types for more information:
|
||||
// https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes
|
||||
//
|
||||
// May not be set unless `oldSelf` is used in `rule`.
|
||||
//
|
||||
// +featureGate=CRDValidationRatcheting
|
||||
// +optional
|
||||
optional bool optionalOldSelf = 6;
|
||||
}
|
||||
|
||||
// WebhookClientConfig contains the information to make a TLS connection with the webhook.
|
||||
|
31
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go
generated
vendored
31
vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go
generated
vendored
@ -249,6 +249,19 @@ type ValidationRule struct {
|
||||
// - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
|
||||
// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
|
||||
// non-intersecting keys are appended, retaining their partial order.
|
||||
//
|
||||
// If `rule` makes use of the `oldSelf` variable it is implicitly a
|
||||
// `transition rule`.
|
||||
//
|
||||
// By default, the `oldSelf` variable is the same type as `self`.
|
||||
// When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional
|
||||
// variable whose value() is the same type as `self`.
|
||||
// See the documentation for the `optionalOldSelf` field for details.
|
||||
//
|
||||
// Transition rules by default are applied only on UPDATE requests and are
|
||||
// skipped if an old value could not be found. You can opt a transition
|
||||
// rule into unconditional evaluation by setting `optionalOldSelf` to true.
|
||||
//
|
||||
Rule string `json:"rule" protobuf:"bytes,1,opt,name=rule"`
|
||||
// Message represents the message displayed when validation fails. The message is required if the Rule contains
|
||||
// line breaks. The message must not contain line breaks.
|
||||
@ -285,6 +298,24 @@ type ValidationRule struct {
|
||||
// e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
|
||||
// +optional
|
||||
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"`
|
||||
|
||||
// optionalOldSelf is used to opt a transition rule into evaluation
|
||||
// even when the object is first created, or if the old object is
|
||||
// missing the value.
|
||||
//
|
||||
// When enabled `oldSelf` will be a CEL optional whose value will be
|
||||
// `None` if there is no old value, or when the object is initially created.
|
||||
//
|
||||
// You may check for presence of oldSelf using `oldSelf.hasValue()` and
|
||||
// unwrap it after checking using `oldSelf.value()`. Check the CEL
|
||||
// documentation for Optional types for more information:
|
||||
// https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes
|
||||
//
|
||||
// May not be set unless `oldSelf` is used in `rule`.
|
||||
//
|
||||
// +featureGate=CRDValidationRatcheting
|
||||
// +optional
|
||||
OptionalOldSelf *bool `json:"optionalOldSelf,omitempty" protobuf:"bytes,6,opt,name=optionalOldSelf"`
|
||||
}
|
||||
|
||||
// JSON represents any valid JSON value.
|
||||
|
@ -1261,6 +1261,7 @@ func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *Validatio
|
||||
out.MessageExpression = in.MessageExpression
|
||||
out.Reason = (*apiextensions.FieldValueErrorReason)(unsafe.Pointer(in.Reason))
|
||||
out.FieldPath = in.FieldPath
|
||||
out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1275,6 +1276,7 @@ func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextens
|
||||
out.MessageExpression = in.MessageExpression
|
||||
out.Reason = (*FieldValueErrorReason)(unsafe.Pointer(in.Reason))
|
||||
out.FieldPath = in.FieldPath
|
||||
out.OptionalOldSelf = (*bool)(unsafe.Pointer(in.OptionalOldSelf))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -619,6 +619,11 @@ func (in *ValidationRule) DeepCopyInto(out *ValidationRule) {
|
||||
*out = new(FieldValueErrorReason)
|
||||
**out = **in
|
||||
}
|
||||
if in.OptionalOldSelf != nil {
|
||||
in, out := &in.OptionalOldSelf, &out.OptionalOldSelf
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -536,6 +536,11 @@ func (in *ValidationRule) DeepCopyInto(out *ValidationRule) {
|
||||
*out = new(FieldValueErrorReason)
|
||||
**out = **in
|
||||
}
|
||||
if in.OptionalOldSelf != nil {
|
||||
in, out := &in.OptionalOldSelf, &out.OptionalOldSelf
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
37
vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
generated
vendored
37
vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
generated
vendored
@ -22,14 +22,15 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// SetStatusCondition sets the corresponding condition in conditions to newCondition.
|
||||
// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true
|
||||
// if the conditions are changed by this call.
|
||||
// conditions must be non-nil.
|
||||
// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to
|
||||
// newCondition, LastTransitionTime is set to now if the new status differs from the old status)
|
||||
// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended)
|
||||
func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) {
|
||||
func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) {
|
||||
if conditions == nil {
|
||||
return
|
||||
return false
|
||||
}
|
||||
existingCondition := FindStatusCondition(*conditions, newCondition.Type)
|
||||
if existingCondition == nil {
|
||||
@ -37,7 +38,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond
|
||||
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||
}
|
||||
*conditions = append(*conditions, newCondition)
|
||||
return
|
||||
return true
|
||||
}
|
||||
|
||||
if existingCondition.Status != newCondition.Status {
|
||||
@ -47,18 +48,31 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond
|
||||
} else {
|
||||
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
|
||||
existingCondition.Reason = newCondition.Reason
|
||||
existingCondition.Message = newCondition.Message
|
||||
existingCondition.ObservedGeneration = newCondition.ObservedGeneration
|
||||
if existingCondition.Reason != newCondition.Reason {
|
||||
existingCondition.Reason = newCondition.Reason
|
||||
changed = true
|
||||
}
|
||||
if existingCondition.Message != newCondition.Message {
|
||||
existingCondition.Message = newCondition.Message
|
||||
changed = true
|
||||
}
|
||||
if existingCondition.ObservedGeneration != newCondition.ObservedGeneration {
|
||||
existingCondition.ObservedGeneration = newCondition.ObservedGeneration
|
||||
changed = true
|
||||
}
|
||||
|
||||
return changed
|
||||
}
|
||||
|
||||
// RemoveStatusCondition removes the corresponding conditionType from conditions.
|
||||
// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns
|
||||
// true if it was present and got removed.
|
||||
// conditions must be non-nil.
|
||||
func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) {
|
||||
func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) {
|
||||
if conditions == nil || len(*conditions) == 0 {
|
||||
return
|
||||
return false
|
||||
}
|
||||
newConditions := make([]metav1.Condition, 0, len(*conditions)-1)
|
||||
for _, condition := range *conditions {
|
||||
@ -67,7 +81,10 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string)
|
||||
}
|
||||
}
|
||||
|
||||
removed = len(*conditions) != len(newConditions)
|
||||
*conditions = newConditions
|
||||
|
||||
return removed
|
||||
}
|
||||
|
||||
// FindStatusCondition finds the conditionType in conditions.
|
||||
|
38
vendor/k8s.io/apimachinery/pkg/api/resource/amount.go
generated
vendored
38
vendor/k8s.io/apimachinery/pkg/api/resource/amount.go
generated
vendored
@ -203,6 +203,44 @@ func (a *int64Amount) Sub(b int64Amount) bool {
|
||||
return a.Add(int64Amount{value: -b.value, scale: b.scale})
|
||||
}
|
||||
|
||||
// Mul multiplies the provided b to the current amount, or
|
||||
// returns false if overflow or underflow would result.
|
||||
func (a *int64Amount) Mul(b int64) bool {
|
||||
switch {
|
||||
case a.value == 0:
|
||||
return true
|
||||
case b == 0:
|
||||
a.value = 0
|
||||
a.scale = 0
|
||||
return true
|
||||
case a.scale == 0:
|
||||
c, ok := int64Multiply(a.value, b)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
a.value = c
|
||||
case a.scale > 0:
|
||||
c, ok := int64Multiply(a.value, b)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if _, ok = positiveScaleInt64(c, a.scale); !ok {
|
||||
return false
|
||||
}
|
||||
a.value = c
|
||||
default:
|
||||
c, ok := int64Multiply(a.value, b)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if _, ok = negativeScaleInt64(c, -a.scale); !ok {
|
||||
return false
|
||||
}
|
||||
a.value = c
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
|
||||
// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
|
||||
func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) {
|
||||
|
10
vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
10
vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
@ -592,6 +592,16 @@ func (q *Quantity) Sub(y Quantity) {
|
||||
q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
|
||||
}
|
||||
|
||||
// Mul multiplies the provided y to the current value.
|
||||
// It will return false if the result is inexact. Otherwise, it will return true.
|
||||
func (q *Quantity) Mul(y int64) bool {
|
||||
q.s = ""
|
||||
if q.d.Dec == nil && q.i.Mul(y) {
|
||||
return true
|
||||
}
|
||||
return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64()
|
||||
}
|
||||
|
||||
// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
|
||||
// quantity is greater than y.
|
||||
func (q *Quantity) Cmp(y Quantity) int {
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
generated
vendored
@ -173,7 +173,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s
|
||||
if str, ok := v.(string); ok {
|
||||
strMap[k] = str
|
||||
} else {
|
||||
return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v)
|
||||
return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v)
|
||||
}
|
||||
}
|
||||
return strMap, true, nil
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user