mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-18 02:39:30 +00:00
rebase: bump k8s.io/api in /api in the k8s-dependencies group
Bumps the k8s-dependencies group in /api with 1 update: [k8s.io/api](https://github.com/kubernetes/api). Updates `k8s.io/api` from 0.31.3 to 0.32.0 - [Commits](https://github.com/kubernetes/api/compare/v0.31.3...v0.32.0) --- updated-dependencies: - dependency-name: k8s.io/api dependency-type: direct:production update-type: version-update:semver-minor dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
bdd7c5a666
commit
d4ec76ca2f
16
api/go.mod
16
api/go.mod
@ -6,7 +6,7 @@ require (
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/openshift/api v0.0.0-20240115183315-0793e918179d
|
||||
github.com/stretchr/testify v1.10.0
|
||||
k8s.io/api v0.31.3
|
||||
k8s.io/api v0.32.0
|
||||
)
|
||||
|
||||
require (
|
||||
@ -16,20 +16,22 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.31.3 // indirect
|
||||
k8s.io/apimachinery v0.32.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
// version 3.9 is really old, don't use that!
|
||||
|
30
api/go.sum
30
api/go.sum
@ -1,3 +1,4 @@
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
@ -55,8 +56,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -65,8 +66,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@ -80,22 +81,21 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8=
|
||||
k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE=
|
||||
k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4=
|
||||
k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
4
api/vendor/golang.org/x/net/LICENSE
generated
vendored
4
api/vendor/golang.org/x/net/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
122
api/vendor/golang.org/x/net/http2/config.go
generated
vendored
Normal file
122
api/vendor/golang.org/x/net/http2/config.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// http2Config is a package-internal version of net/http.HTTP2Config.
|
||||
//
|
||||
// http.HTTP2Config was added in Go 1.24.
|
||||
// When running with a version of net/http that includes HTTP2Config,
|
||||
// we merge the configuration with the fields in Transport or Server
|
||||
// to produce an http2Config.
|
||||
//
|
||||
// Zero valued fields in http2Config are interpreted as in the
|
||||
// net/http.HTTPConfig documentation.
|
||||
//
|
||||
// Precedence order for reconciling configurations is:
|
||||
//
|
||||
// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
|
||||
// - Otherwise use the http2.{Server.Transport} value.
|
||||
// - If the resulting value is zero or out of range, use a default.
|
||||
type http2Config struct {
|
||||
MaxConcurrentStreams uint32
|
||||
MaxDecoderHeaderTableSize uint32
|
||||
MaxEncoderHeaderTableSize uint32
|
||||
MaxReadFrameSize uint32
|
||||
MaxUploadBufferPerConnection int32
|
||||
MaxUploadBufferPerStream int32
|
||||
SendPingTimeout time.Duration
|
||||
PingTimeout time.Duration
|
||||
WriteByteTimeout time.Duration
|
||||
PermitProhibitedCipherSuites bool
|
||||
CountError func(errType string)
|
||||
}
|
||||
|
||||
// configFromServer merges configuration settings from
|
||||
// net/http.Server.HTTP2Config and http2.Server.
|
||||
func configFromServer(h1 *http.Server, h2 *Server) http2Config {
|
||||
conf := http2Config{
|
||||
MaxConcurrentStreams: h2.MaxConcurrentStreams,
|
||||
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
|
||||
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
|
||||
MaxReadFrameSize: h2.MaxReadFrameSize,
|
||||
MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
|
||||
MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
|
||||
SendPingTimeout: h2.ReadIdleTimeout,
|
||||
PingTimeout: h2.PingTimeout,
|
||||
WriteByteTimeout: h2.WriteByteTimeout,
|
||||
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
|
||||
CountError: h2.CountError,
|
||||
}
|
||||
fillNetHTTPServerConfig(&conf, h1)
|
||||
setConfigDefaults(&conf, true)
|
||||
return conf
|
||||
}
|
||||
|
||||
// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
|
||||
// (the net/http Transport).
|
||||
func configFromTransport(h2 *Transport) http2Config {
|
||||
conf := http2Config{
|
||||
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
|
||||
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
|
||||
MaxReadFrameSize: h2.MaxReadFrameSize,
|
||||
SendPingTimeout: h2.ReadIdleTimeout,
|
||||
PingTimeout: h2.PingTimeout,
|
||||
WriteByteTimeout: h2.WriteByteTimeout,
|
||||
}
|
||||
|
||||
// Unlike most config fields, where out-of-range values revert to the default,
|
||||
// Transport.MaxReadFrameSize clips.
|
||||
if conf.MaxReadFrameSize < minMaxFrameSize {
|
||||
conf.MaxReadFrameSize = minMaxFrameSize
|
||||
} else if conf.MaxReadFrameSize > maxFrameSize {
|
||||
conf.MaxReadFrameSize = maxFrameSize
|
||||
}
|
||||
|
||||
if h2.t1 != nil {
|
||||
fillNetHTTPTransportConfig(&conf, h2.t1)
|
||||
}
|
||||
setConfigDefaults(&conf, false)
|
||||
return conf
|
||||
}
|
||||
|
||||
func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
|
||||
if *v < minval || *v > maxval {
|
||||
*v = defval
|
||||
}
|
||||
}
|
||||
|
||||
func setConfigDefaults(conf *http2Config, server bool) {
|
||||
setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
|
||||
setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
|
||||
setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
|
||||
if server {
|
||||
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
|
||||
} else {
|
||||
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
|
||||
}
|
||||
if server {
|
||||
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
|
||||
} else {
|
||||
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
|
||||
}
|
||||
setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
|
||||
setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
|
||||
}
|
||||
|
||||
// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
|
||||
// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
|
||||
func adjustHTTP1MaxHeaderSize(n int64) int64 {
|
||||
// http2's count is in a slightly different unit and includes 32 bytes per pair.
|
||||
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
|
||||
const perFieldOverhead = 32 // per http2 spec
|
||||
const typicalHeaders = 10 // conservative
|
||||
return n + typicalHeaders*perFieldOverhead
|
||||
}
|
61
api/vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
Normal file
61
api/vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.24
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
|
||||
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
|
||||
fillNetHTTPConfig(conf, srv.HTTP2)
|
||||
}
|
||||
|
||||
// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
|
||||
fillNetHTTPConfig(conf, tr.HTTP2)
|
||||
}
|
||||
|
||||
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
|
||||
if h2 == nil {
|
||||
return
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxEncoderHeaderTableSize != 0 {
|
||||
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxDecoderHeaderTableSize != 0 {
|
||||
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxReadFrameSize != 0 {
|
||||
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerConnection != 0 {
|
||||
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerStream != 0 {
|
||||
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
|
||||
}
|
||||
if h2.SendPingTimeout != 0 {
|
||||
conf.SendPingTimeout = h2.SendPingTimeout
|
||||
}
|
||||
if h2.PingTimeout != 0 {
|
||||
conf.PingTimeout = h2.PingTimeout
|
||||
}
|
||||
if h2.WriteByteTimeout != 0 {
|
||||
conf.WriteByteTimeout = h2.WriteByteTimeout
|
||||
}
|
||||
if h2.PermitProhibitedCipherSuites {
|
||||
conf.PermitProhibitedCipherSuites = true
|
||||
}
|
||||
if h2.CountError != nil {
|
||||
conf.CountError = h2.CountError
|
||||
}
|
||||
}
|
16
api/vendor/golang.org/x/net/http2/config_pre_go124.go
generated
vendored
Normal file
16
api/vendor/golang.org/x/net/http2/config_pre_go124.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.24
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Pre-Go 1.24 fallback.
|
||||
// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
|
||||
|
||||
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
|
||||
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
|
49
api/vendor/golang.org/x/net/http2/http2.go
generated
vendored
49
api/vendor/golang.org/x/net/http2/http2.go
generated
vendored
@ -19,8 +19,9 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
@ -238,12 +239,18 @@ func (cw closeWaiter) Wait() {
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
_ incomparable
|
||||
w io.Writer // immutable
|
||||
group synctestGroupInterface // immutable
|
||||
conn net.Conn // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
byteTimeout time.Duration // immutable, WriteByteTimeout
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{w: w}
|
||||
func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
|
||||
return &bufferedWriter{
|
||||
group: group,
|
||||
conn: conn,
|
||||
byteTimeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
||||
@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int {
|
||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||
if w.bw == nil {
|
||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||
bw.Reset(w.w)
|
||||
bw.Reset((*bufferedWriterTimeoutWriter)(w))
|
||||
w.bw = bw
|
||||
}
|
||||
return w.bw.Write(p)
|
||||
@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error {
|
||||
return err
|
||||
}
|
||||
|
||||
type bufferedWriterTimeoutWriter bufferedWriter
|
||||
|
||||
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
|
||||
return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
|
||||
}
|
||||
|
||||
// writeWithByteTimeout writes to conn.
|
||||
// If more than timeout passes without any bytes being written to the connection,
|
||||
// the write fails.
|
||||
func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
|
||||
if timeout <= 0 {
|
||||
return conn.Write(p)
|
||||
}
|
||||
for {
|
||||
var now time.Time
|
||||
if group == nil {
|
||||
now = time.Now()
|
||||
} else {
|
||||
now = group.Now()
|
||||
}
|
||||
conn.SetWriteDeadline(now.Add(timeout))
|
||||
nn, err := conn.Write(p[n:])
|
||||
n += nn
|
||||
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// Either we finished the write, made no progress, or hit the deadline.
|
||||
// Whichever it is, we're done now.
|
||||
conn.SetWriteDeadline(time.Time{})
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustUint31(v int32) uint32 {
|
||||
if v < 0 || v > 2147483647 {
|
||||
panic("out of range")
|
||||
|
173
api/vendor/golang.org/x/net/http2/server.go
generated
vendored
173
api/vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -56,6 +57,10 @@ const (
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
maxQueuedControlFrames = 10000
|
||||
)
|
||||
|
||||
@ -127,6 +132,22 @@ type Server struct {
|
||||
// If zero or negative, there is no timeout.
|
||||
IdleTimeout time.Duration
|
||||
|
||||
// ReadIdleTimeout is the timeout after which a health check using a ping
|
||||
// frame will be carried out if no frame is received on the connection.
|
||||
// If zero, no health check is performed.
|
||||
ReadIdleTimeout time.Duration
|
||||
|
||||
// PingTimeout is the timeout after which the connection will be closed
|
||||
// if a response to a ping is not received.
|
||||
// If zero, a default of 15 seconds is used.
|
||||
PingTimeout time.Duration
|
||||
|
||||
// WriteByteTimeout is the timeout after which a connection will be
|
||||
// closed if no data can be written to it. The timeout begins when data is
|
||||
// available to write, and is extended whenever any bytes are written.
|
||||
// If zero or negative, there is no timeout.
|
||||
WriteByteTimeout time.Duration
|
||||
|
||||
// MaxUploadBufferPerConnection is the size of the initial flow
|
||||
// control window for each connections. The HTTP/2 spec does not
|
||||
// allow this to be smaller than 65535 or larger than 2^32-1.
|
||||
@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer {
|
||||
return timeTimer{time.AfterFunc(d, f)}
|
||||
}
|
||||
|
||||
func (s *Server) initialConnRecvWindowSize() int32 {
|
||||
if s.MaxUploadBufferPerConnection >= initialWindowSize {
|
||||
return s.MaxUploadBufferPerConnection
|
||||
}
|
||||
return 1 << 20
|
||||
}
|
||||
|
||||
func (s *Server) initialStreamRecvWindowSize() int32 {
|
||||
if s.MaxUploadBufferPerStream > 0 {
|
||||
return s.MaxUploadBufferPerStream
|
||||
}
|
||||
return 1 << 20
|
||||
}
|
||||
|
||||
func (s *Server) maxReadFrameSize() uint32 {
|
||||
if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
|
||||
return v
|
||||
}
|
||||
return defaultMaxReadFrameSize
|
||||
}
|
||||
|
||||
func (s *Server) maxConcurrentStreams() uint32 {
|
||||
if v := s.MaxConcurrentStreams; v > 0 {
|
||||
return v
|
||||
}
|
||||
return defaultMaxStreams
|
||||
}
|
||||
|
||||
func (s *Server) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (s *Server) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
func (s *Server) maxQueuedControlFrames() int {
|
||||
// TODO: if anybody asks, add a Server field, and remember to define the
|
||||
// behavior of negative values.
|
||||
return maxQueuedControlFrames
|
||||
}
|
||||
|
||||
type serverInternalState struct {
|
||||
mu sync.Mutex
|
||||
activeConns map[*serverConn]struct{}
|
||||
@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
baseCtx, cancel := serverConnBaseContext(c, opts)
|
||||
defer cancel()
|
||||
|
||||
http1srv := opts.baseConfig()
|
||||
conf := configFromServer(http1srv, s)
|
||||
sc := &serverConn{
|
||||
srv: s,
|
||||
hs: opts.baseConfig(),
|
||||
hs: http1srv,
|
||||
conn: c,
|
||||
baseCtx: baseCtx,
|
||||
remoteAddrStr: c.RemoteAddr().String(),
|
||||
bw: newBufferedWriter(c),
|
||||
bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
|
||||
handler: opts.handler(),
|
||||
streams: make(map[uint32]*stream),
|
||||
readFrameCh: make(chan readFrameResult),
|
||||
@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
|
||||
doneServing: make(chan struct{}),
|
||||
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
|
||||
advMaxStreams: s.maxConcurrentStreams(),
|
||||
advMaxStreams: conf.MaxConcurrentStreams,
|
||||
initialStreamSendWindowSize: initialWindowSize,
|
||||
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
|
||||
maxFrameSize: initialMaxFrameSize,
|
||||
pingTimeout: conf.PingTimeout,
|
||||
countErrorFunc: conf.CountError,
|
||||
serveG: newGoroutineLock(),
|
||||
pushEnabled: true,
|
||||
sawClientPreface: opts.SawClientPreface,
|
||||
@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
sc.flow.add(initialWindowSize)
|
||||
sc.inflow.init(initialWindowSize)
|
||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
|
||||
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
|
||||
|
||||
fr := NewFramer(sc.bw, c)
|
||||
if s.CountError != nil {
|
||||
fr.countError = s.CountError
|
||||
if conf.CountError != nil {
|
||||
fr.countError = conf.CountError
|
||||
}
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
|
||||
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
||||
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
||||
fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
|
||||
sc.framer = fr
|
||||
|
||||
if tc, ok := c.(connectionStater); ok {
|
||||
@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
// So for now, do nothing here again.
|
||||
}
|
||||
|
||||
if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
|
||||
if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
|
||||
// "Endpoints MAY choose to generate a connection error
|
||||
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
|
||||
// the prohibited cipher suites are negotiated."
|
||||
@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
opts.UpgradeRequest = nil
|
||||
}
|
||||
|
||||
sc.serve()
|
||||
sc.serve(conf)
|
||||
}
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
|
||||
@ -609,6 +584,7 @@ type serverConn struct {
|
||||
tlsState *tls.ConnectionState // shared by all handlers, like net/http
|
||||
remoteAddrStr string
|
||||
writeSched WriteScheduler
|
||||
countErrorFunc func(errType string)
|
||||
|
||||
// Everything following is owned by the serve loop; use serveG.check():
|
||||
serveG goroutineLock // used to verify funcs are on serve()
|
||||
@ -628,6 +604,7 @@ type serverConn struct {
|
||||
streams map[uint32]*stream
|
||||
unstartedHandlers []unstartedHandler
|
||||
initialStreamSendWindowSize int32
|
||||
initialStreamRecvWindowSize int32
|
||||
maxFrameSize int32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
|
||||
@ -638,9 +615,14 @@ type serverConn struct {
|
||||
inGoAway bool // we've started to or sent GOAWAY
|
||||
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
|
||||
needToSendGoAway bool // we need to schedule a GOAWAY frame write
|
||||
pingSent bool
|
||||
sentPingData [8]byte
|
||||
goAwayCode ErrCode
|
||||
shutdownTimer timer // nil until used
|
||||
idleTimer timer // nil if unused
|
||||
readIdleTimeout time.Duration
|
||||
pingTimeout time.Duration
|
||||
readIdleTimer timer // nil if unused
|
||||
|
||||
// Owned by the writeFrameAsync goroutine:
|
||||
headerWriteBuf bytes.Buffer
|
||||
@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
|
||||
if n <= 0 {
|
||||
n = http.DefaultMaxHeaderBytes
|
||||
}
|
||||
// http2's count is in a slightly different unit and includes 32 bytes per pair.
|
||||
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
|
||||
const perFieldOverhead = 32 // per http2 spec
|
||||
const typicalHeaders = 10 // conservative
|
||||
return uint32(n + typicalHeaders*perFieldOverhead)
|
||||
return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
|
||||
}
|
||||
|
||||
func (sc *serverConn) curOpenStreams() uint32 {
|
||||
@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) serve() {
|
||||
func (sc *serverConn) serve(conf http2Config) {
|
||||
sc.serveG.check()
|
||||
defer sc.notePanic()
|
||||
defer sc.conn.Close()
|
||||
@ -937,18 +915,18 @@ func (sc *serverConn) serve() {
|
||||
|
||||
sc.writeFrame(FrameWriteRequest{
|
||||
write: writeSettings{
|
||||
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
|
||||
{SettingMaxFrameSize, conf.MaxReadFrameSize},
|
||||
{SettingMaxConcurrentStreams, sc.advMaxStreams},
|
||||
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
|
||||
{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
|
||||
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
|
||||
{SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
|
||||
{SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
|
||||
},
|
||||
})
|
||||
sc.unackedSettings++
|
||||
|
||||
// Each connection starts with initialWindowSize inflow tokens.
|
||||
// If a higher value is configured, we add more tokens.
|
||||
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
|
||||
if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
|
||||
sc.sendWindowUpdate(nil, int(diff))
|
||||
}
|
||||
|
||||
@ -968,11 +946,18 @@ func (sc *serverConn) serve() {
|
||||
defer sc.idleTimer.Stop()
|
||||
}
|
||||
|
||||
if conf.SendPingTimeout > 0 {
|
||||
sc.readIdleTimeout = conf.SendPingTimeout
|
||||
sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
|
||||
defer sc.readIdleTimer.Stop()
|
||||
}
|
||||
|
||||
go sc.readFrames() // closed by defer sc.conn.Close above
|
||||
|
||||
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
||||
defer settingsTimer.Stop()
|
||||
|
||||
lastFrameTime := sc.srv.now()
|
||||
loopNum := 0
|
||||
for {
|
||||
loopNum++
|
||||
@ -986,6 +971,7 @@ func (sc *serverConn) serve() {
|
||||
case res := <-sc.wroteFrameCh:
|
||||
sc.wroteFrame(res)
|
||||
case res := <-sc.readFrameCh:
|
||||
lastFrameTime = sc.srv.now()
|
||||
// Process any written frames before reading new frames from the client since a
|
||||
// written frame could have triggered a new stream to be started.
|
||||
if sc.writingFrameAsync {
|
||||
@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() {
|
||||
case idleTimerMsg:
|
||||
sc.vlogf("connection is idle")
|
||||
sc.goAway(ErrCodeNo)
|
||||
case readIdleTimerMsg:
|
||||
sc.handlePingTimer(lastFrameTime)
|
||||
case shutdownTimerMsg:
|
||||
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
|
||||
return
|
||||
@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() {
|
||||
// If the peer is causing us to generate a lot of control frames,
|
||||
// but not reading them from us, assume they are trying to make us
|
||||
// run out of memory.
|
||||
if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
|
||||
if sc.queuedControlFrames > maxQueuedControlFrames {
|
||||
sc.vlogf("http2: too many control frames in send queue, closing connection")
|
||||
return
|
||||
}
|
||||
@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
|
||||
if sc.pingSent {
|
||||
sc.vlogf("timeout waiting for PING response")
|
||||
sc.conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
|
||||
now := sc.srv.now()
|
||||
if pingAt.After(now) {
|
||||
// We received frames since arming the ping timer.
|
||||
// Reset it for the next possible timeout.
|
||||
sc.readIdleTimer.Reset(pingAt.Sub(now))
|
||||
return
|
||||
}
|
||||
|
||||
sc.pingSent = true
|
||||
// Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
|
||||
// is we send a PING frame containing 0s.
|
||||
_, _ = rand.Read(sc.sentPingData[:])
|
||||
sc.writeFrame(FrameWriteRequest{
|
||||
write: &writePing{data: sc.sentPingData},
|
||||
})
|
||||
sc.readIdleTimer.Reset(sc.pingTimeout)
|
||||
}
|
||||
|
||||
type serverMessage int
|
||||
|
||||
// Message values sent to serveMsgCh.
|
||||
var (
|
||||
settingsTimerMsg = new(serverMessage)
|
||||
idleTimerMsg = new(serverMessage)
|
||||
readIdleTimerMsg = new(serverMessage)
|
||||
shutdownTimerMsg = new(serverMessage)
|
||||
gracefulShutdownMsg = new(serverMessage)
|
||||
handlerDoneMsg = new(serverMessage)
|
||||
@ -1068,6 +1083,7 @@ var (
|
||||
|
||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
||||
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
|
||||
func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
|
||||
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
|
||||
|
||||
func (sc *serverConn) sendServeMsg(msg interface{}) {
|
||||
@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
|
||||
sc.writingFrame = false
|
||||
sc.writingFrameAsync = false
|
||||
|
||||
if res.err != nil {
|
||||
sc.conn.Close()
|
||||
}
|
||||
|
||||
wr := res.wr
|
||||
|
||||
if writeEndsStream(wr.write) {
|
||||
@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error {
|
||||
func (sc *serverConn) processPing(f *PingFrame) error {
|
||||
sc.serveG.check()
|
||||
if f.IsAck() {
|
||||
if sc.pingSent && sc.sentPingData == f.Data {
|
||||
// This is a response to a PING we sent.
|
||||
sc.pingSent = false
|
||||
sc.readIdleTimer.Reset(sc.readIdleTimeout)
|
||||
}
|
||||
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
|
||||
// containing this flag."
|
||||
return nil
|
||||
@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.cw.Init()
|
||||
st.flow.conn = &sc.flow // link to conn-level counter
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||
st.inflow.init(sc.initialStreamRecvWindowSize)
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error {
|
||||
if sc == nil || sc.srv == nil {
|
||||
return err
|
||||
}
|
||||
f := sc.srv.CountError
|
||||
f := sc.countErrorFunc
|
||||
if f == nil {
|
||||
return err
|
||||
}
|
||||
|
105
api/vendor/golang.org/x/net/http2/transport.go
generated
vendored
105
api/vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
|
||||
}
|
||||
|
||||
func (t *Transport) maxHeaderListSize() uint32 {
|
||||
if t.MaxHeaderListSize == 0 {
|
||||
n := int64(t.MaxHeaderListSize)
|
||||
if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
|
||||
n = t.t1.MaxResponseHeaderBytes
|
||||
if n > 0 {
|
||||
n = adjustHTTP1MaxHeaderSize(n)
|
||||
}
|
||||
}
|
||||
if n <= 0 {
|
||||
return 10 << 20
|
||||
}
|
||||
if t.MaxHeaderListSize == 0xffffffff {
|
||||
if n >= 0xffffffff {
|
||||
return 0
|
||||
}
|
||||
return t.MaxHeaderListSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxFrameReadSize() uint32 {
|
||||
if t.MaxReadFrameSize == 0 {
|
||||
return 0 // use the default provided by the peer
|
||||
}
|
||||
if t.MaxReadFrameSize < minMaxFrameSize {
|
||||
return minMaxFrameSize
|
||||
}
|
||||
if t.MaxReadFrameSize > maxFrameSize {
|
||||
return maxFrameSize
|
||||
}
|
||||
return t.MaxReadFrameSize
|
||||
return uint32(n)
|
||||
}
|
||||
|
||||
func (t *Transport) disableCompression() bool {
|
||||
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
|
||||
}
|
||||
|
||||
func (t *Transport) pingTimeout() time.Duration {
|
||||
if t.PingTimeout == 0 {
|
||||
return 15 * time.Second
|
||||
}
|
||||
return t.PingTimeout
|
||||
|
||||
}
|
||||
|
||||
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
|
||||
// It returns an error if t1 has already been HTTP/2-enabled.
|
||||
//
|
||||
@ -375,6 +360,9 @@ type ClientConn struct {
|
||||
peerMaxHeaderListSize uint64
|
||||
peerMaxHeaderTableSize uint32
|
||||
initialWindowSize uint32
|
||||
initialStreamRecvWindowSize int32
|
||||
readIdleTimeout time.Duration
|
||||
pingTimeout time.Duration
|
||||
|
||||
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
|
||||
// Write to reqHeaderMu to lock it, read from it to unlock.
|
||||
@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() {
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
group synctestGroupInterface
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
err *error
|
||||
@ -508,23 +497,10 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
|
||||
if *sew.err != nil {
|
||||
return 0, *sew.err
|
||||
}
|
||||
for {
|
||||
if sew.timeout != 0 {
|
||||
sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
|
||||
}
|
||||
nn, err := sew.conn.Write(p[n:])
|
||||
n += nn
|
||||
if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// Keep extending the deadline so long as we're making progress.
|
||||
continue
|
||||
}
|
||||
if sew.timeout != 0 {
|
||||
sew.conn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
|
||||
*sew.err = err
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// noCachedConnError is the concrete type of ErrNoCachedConn, which
|
||||
// needs to be detected by net/http regardless of whether it's its
|
||||
@ -758,25 +734,12 @@ func (t *Transport) expectContinueTimeout() time.Duration {
|
||||
return t.t1.ExpectContinueTimeout
|
||||
}
|
||||
|
||||
func (t *Transport) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
conf := configFromTransport(t)
|
||||
cc := &ClientConn{
|
||||
t: t,
|
||||
tconn: c,
|
||||
@ -784,18 +747,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
nextStreamID: 1,
|
||||
maxFrameSize: 16 << 10, // spec default
|
||||
initialWindowSize: 65535, // spec default
|
||||
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
|
||||
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
|
||||
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
|
||||
streams: make(map[uint32]*clientStream),
|
||||
singleUse: singleUse,
|
||||
wantSettingsAck: true,
|
||||
readIdleTimeout: conf.SendPingTimeout,
|
||||
pingTimeout: conf.PingTimeout,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
}
|
||||
var group synctestGroupInterface
|
||||
if t.transportTestHooks != nil {
|
||||
t.markNewGoroutine()
|
||||
t.transportTestHooks.newclientconn(cc)
|
||||
c = cc.tconn
|
||||
group = t.group
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
@ -807,30 +775,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
// TODO: adjust this writer size to account for frame size +
|
||||
// MTU + crypto/tls record padding.
|
||||
cc.bw = bufio.NewWriter(stickyErrWriter{
|
||||
group: group,
|
||||
conn: c,
|
||||
timeout: t.WriteByteTimeout,
|
||||
timeout: conf.WriteByteTimeout,
|
||||
err: &cc.werr,
|
||||
})
|
||||
cc.br = bufio.NewReader(c)
|
||||
cc.fr = NewFramer(cc.bw, cc.br)
|
||||
if t.maxFrameReadSize() != 0 {
|
||||
cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
|
||||
}
|
||||
cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
|
||||
if t.CountError != nil {
|
||||
cc.fr.countError = t.CountError
|
||||
}
|
||||
maxHeaderTableSize := t.maxDecoderHeaderTableSize()
|
||||
maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
|
||||
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
|
||||
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
|
||||
|
||||
cc.henc = hpack.NewEncoder(&cc.hbuf)
|
||||
cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
|
||||
cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
|
||||
cc.peerMaxHeaderTableSize = initialHeaderTableSize
|
||||
|
||||
if t.AllowHTTP {
|
||||
cc.nextStreamID = 3
|
||||
}
|
||||
|
||||
if cs, ok := c.(connectionStater); ok {
|
||||
state := cs.ConnectionState()
|
||||
cc.tlsState = &state
|
||||
@ -838,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
|
||||
initialSettings := []Setting{
|
||||
{ID: SettingEnablePush, Val: 0},
|
||||
{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
|
||||
}
|
||||
if max := t.maxFrameReadSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
|
||||
{ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
|
||||
}
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
|
||||
if max := t.maxHeaderListSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
|
||||
}
|
||||
@ -852,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
|
||||
cc.bw.Write(clientPreface)
|
||||
cc.fr.WriteSettings(initialSettings...)
|
||||
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
|
||||
cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
|
||||
cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
|
||||
cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
|
||||
cc.bw.Flush()
|
||||
if cc.werr != nil {
|
||||
cc.Close()
|
||||
@ -871,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
}
|
||||
|
||||
func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
pingTimeout := cc.pingTimeout
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
|
||||
@ -2203,7 +2164,7 @@ type resAndError struct {
|
||||
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
|
||||
cs.flow.add(int32(cc.initialWindowSize))
|
||||
cs.flow.setConnFlow(&cc.flow)
|
||||
cs.inflow.init(transportDefaultStreamFlow)
|
||||
cs.inflow.init(cc.initialStreamRecvWindowSize)
|
||||
cs.ID = cc.nextStreamID
|
||||
cc.nextStreamID += 2
|
||||
cc.streams[cs.ID] = cs
|
||||
@ -2349,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
|
||||
func (rl *clientConnReadLoop) run() error {
|
||||
cc := rl.cc
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
readIdleTimeout := cc.readIdleTimeout
|
||||
var t timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
|
10
api/vendor/golang.org/x/net/http2/write.go
generated
vendored
10
api/vendor/golang.org/x/net/http2/write.go
generated
vendored
@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
|
||||
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
type writePing struct {
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (w writePing) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(false, w.data)
|
||||
}
|
||||
|
||||
func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
|
||||
|
||||
type writePingAck struct{ pf *PingFrame }
|
||||
|
||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||
|
4
api/vendor/golang.org/x/text/LICENSE
generated
vendored
4
api/vendor/golang.org/x/text/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
4
api/vendor/k8s.io/api/core/v1/annotation_key_constants.go
generated
vendored
4
api/vendor/k8s.io/api/core/v1/annotation_key_constants.go
generated
vendored
@ -23,7 +23,7 @@ const (
|
||||
// webhook backend fails.
|
||||
ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open"
|
||||
|
||||
// MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods
|
||||
// MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods
|
||||
MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
|
||||
|
||||
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
|
||||
@ -80,7 +80,7 @@ const (
|
||||
// This annotation can be attached to node.
|
||||
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
|
||||
|
||||
// annotation key prefix used to identify non-convertible json paths.
|
||||
// NonConvertibleAnnotationPrefix is the annotation key prefix used to identify non-convertible json paths.
|
||||
NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
|
||||
|
||||
kubectlPrefix = "kubectl.kubernetes.io/"
|
||||
|
2159
api/vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
2159
api/vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
249
api/vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
249
api/vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
@ -181,7 +181,6 @@ message AzureFileVolumeSource {
|
||||
}
|
||||
|
||||
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
|
||||
// Deprecated in 1.7, please use the bindings subresource of pods instead.
|
||||
message Binding {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
@ -192,7 +191,7 @@ message Binding {
|
||||
optional ObjectReference target = 2;
|
||||
}
|
||||
|
||||
// Represents storage that is managed by an external CSI volume driver (Beta feature)
|
||||
// Represents storage that is managed by an external CSI volume driver
|
||||
message CSIPersistentVolumeSource {
|
||||
// driver is the name of the driver to use for this volume.
|
||||
// Required.
|
||||
@ -1071,7 +1070,7 @@ message ContainerStatus {
|
||||
// AllocatedResources represents the compute resources allocated for this container by the
|
||||
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
|
||||
// and after successfully admitting desired pod resize.
|
||||
// +featureGate=InPlacePodVerticalScaling
|
||||
// +featureGate=InPlacePodVerticalScalingAllocatedStatus
|
||||
// +optional
|
||||
map<string, .k8s.io.apimachinery.pkg.api.resource.Quantity> allocatedResources = 10;
|
||||
|
||||
@ -1870,6 +1869,7 @@ message GCEPersistentDiskVolumeSource {
|
||||
optional bool readOnly = 4;
|
||||
}
|
||||
|
||||
// GRPCAction specifies an action involving a GRPC service.
|
||||
message GRPCAction {
|
||||
// Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
optional int32 port = 1;
|
||||
@ -2203,21 +2203,21 @@ message Lifecycle {
|
||||
// LifecycleHandler defines a specific action that should be taken in a lifecycle
|
||||
// hook. One and only one of the fields, except TCPSocket must be specified.
|
||||
message LifecycleHandler {
|
||||
// Exec specifies the action to take.
|
||||
// Exec specifies a command to execute in the container.
|
||||
// +optional
|
||||
optional ExecAction exec = 1;
|
||||
|
||||
// HTTPGet specifies the http request to perform.
|
||||
// HTTPGet specifies an HTTP GET request to perform.
|
||||
// +optional
|
||||
optional HTTPGetAction httpGet = 2;
|
||||
|
||||
// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
// for the backward compatibility. There are no validation of this field and
|
||||
// lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
// for backward compatibility. There is no validation of this field and
|
||||
// lifecycle hooks will fail at runtime when it is specified.
|
||||
// +optional
|
||||
optional TCPSocketAction tcpSocket = 3;
|
||||
|
||||
// Sleep represents the duration that the container should sleep before being terminated.
|
||||
// Sleep represents a duration that the container should sleep.
|
||||
// +featureGate=PodLifecycleSleepAction
|
||||
// +optional
|
||||
optional SleepAction sleep = 4;
|
||||
@ -2346,13 +2346,23 @@ message LoadBalancerStatus {
|
||||
|
||||
// LocalObjectReference contains enough information to let you locate the
|
||||
// referenced object inside the same namespace.
|
||||
// ---
|
||||
// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
|
||||
// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
|
||||
// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
|
||||
// Those cannot be well described when embedded.
|
||||
// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
|
||||
// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type
|
||||
// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
|
||||
//
|
||||
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
|
||||
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
|
||||
// +structType=atomic
|
||||
message LocalObjectReference {
|
||||
// Name of the referent.
|
||||
// This field is effectively required, but due to backwards compatibility is
|
||||
// allowed to be empty. Instances of this type with an empty value here are
|
||||
// almost certainly wrong.
|
||||
// TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
// +optional
|
||||
// +default=""
|
||||
@ -2361,7 +2371,7 @@ message LocalObjectReference {
|
||||
optional string name = 1;
|
||||
}
|
||||
|
||||
// Local represents directly-attached storage with node affinity (Beta feature)
|
||||
// Local represents directly-attached storage with node affinity
|
||||
message LocalVolumeSource {
|
||||
// path of the full path to the volume on the node.
|
||||
// It can be either a directory or block device (disk, partition, ...).
|
||||
@ -2438,12 +2448,15 @@ message NamespaceCondition {
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
optional string status = 2;
|
||||
|
||||
// Last time the condition transitioned from one status to another.
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
|
||||
|
||||
// Unique, one-word, CamelCase reason for the condition's last transition.
|
||||
// +optional
|
||||
optional string reason = 5;
|
||||
|
||||
// Human-readable message indicating details about last transition.
|
||||
// +optional
|
||||
optional string message = 6;
|
||||
}
|
||||
@ -2783,7 +2796,7 @@ message NodeStatus {
|
||||
optional string phase = 3;
|
||||
|
||||
// Conditions is an array of current observed node conditions.
|
||||
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
|
||||
// More info: https://kubernetes.io/docs/reference/node/node-status/#condition
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
@ -2793,7 +2806,7 @@ message NodeStatus {
|
||||
|
||||
// List of addresses reachable to the node.
|
||||
// Queried from cloud provider, if available.
|
||||
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
|
||||
// More info: https://kubernetes.io/docs/reference/node/node-status/#addresses
|
||||
// Note: This field is declared as mergeable, but the merge key is not sufficiently
|
||||
// unique, which can cause data corruption when it is merged. Callers should instead
|
||||
// use a full-replacement patch. See https://pr.k8s.io/79391 for an example.
|
||||
@ -2813,7 +2826,7 @@ message NodeStatus {
|
||||
optional NodeDaemonEndpoints daemonEndpoints = 6;
|
||||
|
||||
// Set of ids/uuids to uniquely identify the node.
|
||||
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
|
||||
// More info: https://kubernetes.io/docs/reference/node/node-status/#info
|
||||
// +optional
|
||||
optional NodeSystemInfo nodeInfo = 7;
|
||||
|
||||
@ -3001,8 +3014,13 @@ message PersistentVolumeClaim {
|
||||
|
||||
// PersistentVolumeClaimCondition contains details about state of pvc
|
||||
message PersistentVolumeClaimCondition {
|
||||
// Type is the type of the condition.
|
||||
// More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about
|
||||
optional string type = 1;
|
||||
|
||||
// Status is the status of the condition.
|
||||
// Can be True, False, Unknown.
|
||||
// More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required
|
||||
optional string status = 2;
|
||||
|
||||
// lastProbeTime is the time we probed the condition.
|
||||
@ -3280,12 +3298,16 @@ message PersistentVolumeList {
|
||||
message PersistentVolumeSource {
|
||||
// gcePersistentDisk represents a GCE Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
|
||||
// Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
|
||||
// gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
|
||||
// +optional
|
||||
optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1;
|
||||
|
||||
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod.
|
||||
// Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
|
||||
// awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
|
||||
// +optional
|
||||
optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2;
|
||||
@ -3300,6 +3322,7 @@ message PersistentVolumeSource {
|
||||
|
||||
// glusterfs represents a Glusterfs volume that is attached to a host and
|
||||
// exposed to the pod. Provisioned by an admin.
|
||||
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
// +optional
|
||||
optional GlusterfsPersistentVolumeSource glusterfs = 4;
|
||||
@ -3310,6 +3333,7 @@ message PersistentVolumeSource {
|
||||
optional NFSVolumeSource nfs = 5;
|
||||
|
||||
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
// +optional
|
||||
optional RBDPersistentVolumeSource rbd = 6;
|
||||
@ -3320,11 +3344,14 @@ message PersistentVolumeSource {
|
||||
optional ISCSIPersistentVolumeSource iscsi = 7;
|
||||
|
||||
// cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
|
||||
// are redirected to the cinder.csi.openstack.org CSI driver.
|
||||
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
|
||||
// +optional
|
||||
optional CinderPersistentVolumeSource cinder = 8;
|
||||
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
|
||||
// +optional
|
||||
optional CephFSPersistentVolumeSource cephfs = 9;
|
||||
|
||||
@ -3332,39 +3359,53 @@ message PersistentVolumeSource {
|
||||
// +optional
|
||||
optional FCVolumeSource fc = 10;
|
||||
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running.
|
||||
// Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
|
||||
// +optional
|
||||
optional FlockerVolumeSource flocker = 11;
|
||||
|
||||
// flexVolume represents a generic volume resource that is
|
||||
// provisioned/attached using an exec based plugin.
|
||||
// Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
|
||||
// +optional
|
||||
optional FlexPersistentVolumeSource flexVolume = 12;
|
||||
|
||||
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
|
||||
// are redirected to the file.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
optional AzureFilePersistentVolumeSource azureFile = 13;
|
||||
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
|
||||
// are redirected to the csi.vsphere.vmware.com CSI driver.
|
||||
// +optional
|
||||
optional VsphereVirtualDiskVolumeSource vsphereVolume = 14;
|
||||
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
|
||||
// +optional
|
||||
optional QuobyteVolumeSource quobyte = 15;
|
||||
|
||||
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
|
||||
// are redirected to the disk.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
optional AzureDiskVolumeSource azureDisk = 16;
|
||||
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
|
||||
// Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
|
||||
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17;
|
||||
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
|
||||
// are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
|
||||
// is on.
|
||||
// +optional
|
||||
optional PortworxVolumeSource portworxVolume = 18;
|
||||
|
||||
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
|
||||
// +optional
|
||||
optional ScaleIOPersistentVolumeSource scaleIO = 19;
|
||||
|
||||
@ -3372,12 +3413,13 @@ message PersistentVolumeSource {
|
||||
// +optional
|
||||
optional LocalVolumeSource local = 20;
|
||||
|
||||
// storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
|
||||
// storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod.
|
||||
// Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/storageos/README.md
|
||||
// +optional
|
||||
optional StorageOSPersistentVolumeSource storageos = 21;
|
||||
|
||||
// csi represents storage that is handled by an external CSI driver (Beta feature).
|
||||
// csi represents storage that is handled by an external CSI driver.
|
||||
// +optional
|
||||
optional CSIPersistentVolumeSource csi = 22;
|
||||
}
|
||||
@ -3710,9 +3752,11 @@ message PodDNSConfig {
|
||||
|
||||
// PodDNSConfigOption defines DNS resolver options of a pod.
|
||||
message PodDNSConfigOption {
|
||||
// Name is this DNS resolver option's name.
|
||||
// Required.
|
||||
optional string name = 1;
|
||||
|
||||
// Value is this DNS resolver option's value.
|
||||
// +optional
|
||||
optional string value = 2;
|
||||
}
|
||||
@ -3803,7 +3847,8 @@ message PodLogOptions {
|
||||
optional bool timestamps = 6;
|
||||
|
||||
// If set, the number of lines from the end of the logs to show. If not specified,
|
||||
// logs are shown from the creation of the container or sinceSeconds or sinceTime
|
||||
// logs are shown from the creation of the container or sinceSeconds or sinceTime.
|
||||
// Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
|
||||
// +optional
|
||||
optional int64 tailLines = 7;
|
||||
|
||||
@ -3821,6 +3866,14 @@ message PodLogOptions {
|
||||
// the actual log data coming from the real kubelet).
|
||||
// +optional
|
||||
optional bool insecureSkipTLSVerifyBackend = 9;
|
||||
|
||||
// Specify which container log stream to return to the client.
|
||||
// Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr
|
||||
// are returned interleaved.
|
||||
// Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
|
||||
// +featureGate=PodLogsQuerySplitStreams
|
||||
// +optional
|
||||
optional string stream = 10;
|
||||
}
|
||||
|
||||
// PodOS defines the OS parameters of a pod.
|
||||
@ -4029,6 +4082,33 @@ message PodSecurityContext {
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
optional AppArmorProfile appArmorProfile = 11;
|
||||
|
||||
// seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
|
||||
// It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
|
||||
// Valid values are "MountOption" and "Recursive".
|
||||
//
|
||||
// "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
|
||||
// This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
|
||||
//
|
||||
// "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
|
||||
// This requires all Pods that share the same volume to use the same SELinux label.
|
||||
// It is not possible to share the same volume among privileged and unprivileged Pods.
|
||||
// Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
|
||||
// whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
|
||||
// CSIDriver instance. Other volumes are always re-labelled recursively.
|
||||
// "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
|
||||
//
|
||||
// If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
|
||||
// If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
|
||||
// and "Recursive" for all other volumes.
|
||||
//
|
||||
// This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
|
||||
//
|
||||
// All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +featureGate=SELinuxChangePolicy
|
||||
// +optional
|
||||
optional string seLinuxChangePolicy = 13;
|
||||
}
|
||||
|
||||
// Describes the class of pods that should avoid this node.
|
||||
@ -4386,6 +4466,21 @@ message PodSpec {
|
||||
// +featureGate=DynamicResourceAllocation
|
||||
// +optional
|
||||
repeated PodResourceClaim resourceClaims = 39;
|
||||
|
||||
// Resources is the total amount of CPU and Memory resources required by all
|
||||
// containers in the pod. It supports specifying Requests and Limits for
|
||||
// "cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
//
|
||||
// This field enables fine-grained control over resource allocation for the
|
||||
// entire pod, allowing resource sharing among containers in a pod.
|
||||
// TODO: For beta graduation, expand this comment with a detailed explanation.
|
||||
//
|
||||
// This is an alpha field and requires enabling the PodLevelResources feature
|
||||
// gate.
|
||||
//
|
||||
// +featureGate=PodLevelResources
|
||||
// +optional
|
||||
optional ResourceRequirements resources = 40;
|
||||
}
|
||||
|
||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||
@ -4477,14 +4572,26 @@ message PodStatus {
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7;
|
||||
|
||||
// The list has one entry per init container in the manifest. The most recent successful
|
||||
// Statuses of init containers in this pod. The most recent successful non-restartable
|
||||
// init container will have ready = true, the most recently started container will have
|
||||
// startTime set.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
|
||||
// Each init container in the pod should have at most one status in this list,
|
||||
// and all statuses should be for containers in the pod.
|
||||
// However this is not enforced.
|
||||
// If a status for a non-existent container is present in the list, or the list has duplicate names,
|
||||
// the behavior of various Kubernetes components is not defined and those statuses might be
|
||||
// ignored.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status
|
||||
// +listType=atomic
|
||||
repeated ContainerStatus initContainerStatuses = 10;
|
||||
|
||||
// The list has one entry per container in the manifest.
|
||||
// Statuses of containers in this pod.
|
||||
// Each container in the pod should have at most one status in this list,
|
||||
// and all statuses should be for containers in the pod.
|
||||
// However this is not enforced.
|
||||
// If a status for a non-existent container is present in the list, or the list has duplicate names,
|
||||
// the behavior of various Kubernetes components is not defined and those statuses might be
|
||||
// ignored.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
@ -4496,7 +4603,14 @@ message PodStatus {
|
||||
// +optional
|
||||
optional string qosClass = 9;
|
||||
|
||||
// Status for any ephemeral containers that have run in this pod.
|
||||
// Statuses for any ephemeral containers that have run in this pod.
|
||||
// Each ephemeral container in the pod should have at most one status in this list,
|
||||
// and all statuses should be for containers in the pod.
|
||||
// However this is not enforced.
|
||||
// If a status for a non-existent container is present in the list, or the list has duplicate names,
|
||||
// the behavior of various Kubernetes components is not defined and those statuses might be
|
||||
// ignored.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
repeated ContainerStatus ephemeralContainerStatuses = 13;
|
||||
@ -4571,6 +4685,7 @@ message PodTemplateSpec {
|
||||
optional PodSpec spec = 2;
|
||||
}
|
||||
|
||||
// PortStatus represents the error condition of a service port
|
||||
message PortStatus {
|
||||
// Port is the port number of the service port of which status is recorded here
|
||||
optional int32 port = 1;
|
||||
@ -4695,19 +4810,19 @@ message Probe {
|
||||
// ProbeHandler defines a specific action that should be taken in a probe.
|
||||
// One and only one of the fields must be specified.
|
||||
message ProbeHandler {
|
||||
// Exec specifies the action to take.
|
||||
// Exec specifies a command to execute in the container.
|
||||
// +optional
|
||||
optional ExecAction exec = 1;
|
||||
|
||||
// HTTPGet specifies the http request to perform.
|
||||
// HTTPGet specifies an HTTP GET request to perform.
|
||||
// +optional
|
||||
optional HTTPGetAction httpGet = 2;
|
||||
|
||||
// TCPSocket specifies an action involving a TCP port.
|
||||
// TCPSocket specifies a connection to a TCP port.
|
||||
// +optional
|
||||
optional TCPSocketAction tcpSocket = 3;
|
||||
|
||||
// GRPC specifies an action involving a GRPC port.
|
||||
// GRPC specifies a GRPC HealthCheckRequest.
|
||||
// +optional
|
||||
optional GRPCAction grpc = 4;
|
||||
}
|
||||
@ -5036,7 +5151,7 @@ message ResourceFieldSelector {
|
||||
}
|
||||
|
||||
// ResourceHealth represents the health of a resource. It has the latest device health information.
|
||||
// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
|
||||
// This is a part of KEP https://kep.k8s.io/4680.
|
||||
message ResourceHealth {
|
||||
// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
|
||||
optional string resourceID = 1;
|
||||
@ -5145,15 +5260,18 @@ message ResourceRequirements {
|
||||
repeated ResourceClaim claims = 3;
|
||||
}
|
||||
|
||||
// ResourceStatus represents the status of a single resource allocated to a Pod.
|
||||
message ResourceStatus {
|
||||
// Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
|
||||
// Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec.
|
||||
// For DRA resources, the value must be "claim:<claim_name>/<request>".
|
||||
// When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container.
|
||||
// +required
|
||||
optional string name = 1;
|
||||
|
||||
// List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
|
||||
// At a minimum, ResourceID must uniquely identify the Resource
|
||||
// allocated to the Pod on the Node for the lifetime of a Pod.
|
||||
// See ResourceID type for it's definition.
|
||||
// List of unique resources health. Each element in the list contains an unique resource ID and its health.
|
||||
// At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node.
|
||||
// If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share.
|
||||
// See ResourceID type definition for a specific format it has in various use cases.
|
||||
// +listType=map
|
||||
// +listMapKey=resourceID
|
||||
repeated ResourceHealth resources = 2;
|
||||
@ -5611,6 +5729,8 @@ message ServiceAccount {
|
||||
|
||||
// Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
|
||||
// Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
|
||||
// The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32.
|
||||
// Prefer separate namespaces to isolate access to mounted secrets.
|
||||
// This field should not be used to find auto-generated service account token secrets for use outside of pods.
|
||||
// Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/secret
|
||||
@ -5996,7 +6116,7 @@ message ServiceSpec {
|
||||
// not set, the implementation will apply its default routing strategy. If set
|
||||
// to "PreferClose", implementations should prioritize endpoints that are
|
||||
// topologically close (e.g., same zone).
|
||||
// This is an alpha field and requires enabling ServiceTrafficDistribution feature.
|
||||
// This is a beta field and requires enabling ServiceTrafficDistribution feature.
|
||||
// +featureGate=ServiceTrafficDistribution
|
||||
// +optional
|
||||
optional string trafficDistribution = 23;
|
||||
@ -6323,6 +6443,20 @@ message TopologySpreadConstraint {
|
||||
|
||||
// TypedLocalObjectReference contains enough information to let you locate the
|
||||
// typed referenced object inside the same namespace.
|
||||
// ---
|
||||
// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
|
||||
// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
|
||||
// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
|
||||
// Those cannot be well described when embedded.
|
||||
// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
|
||||
// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
|
||||
// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
|
||||
// and the version of the actual struct is irrelevant.
|
||||
// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type
|
||||
// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
|
||||
//
|
||||
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
|
||||
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
|
||||
// +structType=atomic
|
||||
message TypedLocalObjectReference {
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
@ -6338,6 +6472,7 @@ message TypedLocalObjectReference {
|
||||
optional string name = 3;
|
||||
}
|
||||
|
||||
// TypedObjectReference contains enough information to let you locate the typed referenced object
|
||||
message TypedObjectReference {
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
// If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
@ -6538,18 +6673,22 @@ message VolumeSource {
|
||||
|
||||
// gcePersistentDisk represents a GCE Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod.
|
||||
// Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
|
||||
// gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
|
||||
// +optional
|
||||
optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3;
|
||||
|
||||
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod.
|
||||
// Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
|
||||
// awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
|
||||
// +optional
|
||||
optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4;
|
||||
|
||||
// gitRepo represents a git repository at a particular revision.
|
||||
// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
// Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
|
||||
// into the Pod's container.
|
||||
// +optional
|
||||
@ -6572,6 +6711,7 @@ message VolumeSource {
|
||||
optional ISCSIVolumeSource iscsi = 8;
|
||||
|
||||
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
// +optional
|
||||
optional GlusterfsVolumeSource glusterfs = 9;
|
||||
@ -6583,25 +6723,31 @@ message VolumeSource {
|
||||
optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10;
|
||||
|
||||
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
// +optional
|
||||
optional RBDVolumeSource rbd = 11;
|
||||
|
||||
// flexVolume represents a generic volume resource that is
|
||||
// provisioned/attached using an exec based plugin.
|
||||
// Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
|
||||
// +optional
|
||||
optional FlexVolumeSource flexVolume = 12;
|
||||
|
||||
// cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
|
||||
// are redirected to the cinder.csi.openstack.org CSI driver.
|
||||
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
|
||||
// +optional
|
||||
optional CinderVolumeSource cinder = 13;
|
||||
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
|
||||
// +optional
|
||||
optional CephFSVolumeSource cephfs = 14;
|
||||
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
|
||||
// Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
|
||||
// +optional
|
||||
optional FlockerVolumeSource flocker = 15;
|
||||
|
||||
@ -6614,6 +6760,8 @@ message VolumeSource {
|
||||
optional FCVolumeSource fc = 17;
|
||||
|
||||
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
|
||||
// are redirected to the file.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
optional AzureFileVolumeSource azureFile = 18;
|
||||
|
||||
@ -6621,37 +6769,48 @@ message VolumeSource {
|
||||
// +optional
|
||||
optional ConfigMapVolumeSource configMap = 19;
|
||||
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
|
||||
// are redirected to the csi.vsphere.vmware.com CSI driver.
|
||||
// +optional
|
||||
optional VsphereVirtualDiskVolumeSource vsphereVolume = 20;
|
||||
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
|
||||
// +optional
|
||||
optional QuobyteVolumeSource quobyte = 21;
|
||||
|
||||
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
|
||||
// are redirected to the disk.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
optional AzureDiskVolumeSource azureDisk = 22;
|
||||
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
|
||||
// Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
|
||||
optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23;
|
||||
|
||||
// projected items for all in one resources secrets, configmaps, and downward API
|
||||
optional ProjectedVolumeSource projected = 26;
|
||||
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
|
||||
// are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
|
||||
// is on.
|
||||
// +optional
|
||||
optional PortworxVolumeSource portworxVolume = 24;
|
||||
|
||||
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
|
||||
// +optional
|
||||
optional ScaleIOVolumeSource scaleIO = 25;
|
||||
|
||||
// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
|
||||
// Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
|
||||
// +optional
|
||||
optional StorageOSVolumeSource storageos = 27;
|
||||
|
||||
// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
|
||||
// +optional
|
||||
optional CSIVolumeSource csi = 28;
|
||||
|
||||
|
2
api/vendor/k8s.io/api/core/v1/objectreference.go
generated
vendored
2
api/vendor/k8s.io/api/core/v1/objectreference.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that
|
||||
// SetGroupVersionKind allows clients to preemptively get a reference to an API object and pass it to places that
|
||||
// intend only to get a reference to that object. This simplifies the event recording interface.
|
||||
func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
|
||||
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
|
||||
|
282
api/vendor/k8s.io/api/core/v1/types.go
generated
vendored
282
api/vendor/k8s.io/api/core/v1/types.go
generated
vendored
@ -63,16 +63,20 @@ type VolumeSource struct {
|
||||
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
|
||||
// gcePersistentDisk represents a GCE Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod.
|
||||
// Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
|
||||
// gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
|
||||
// +optional
|
||||
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
|
||||
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod.
|
||||
// Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
|
||||
// awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
|
||||
// +optional
|
||||
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
|
||||
// gitRepo represents a git repository at a particular revision.
|
||||
// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
// Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
|
||||
// into the Pod's container.
|
||||
// +optional
|
||||
@ -91,6 +95,7 @@ type VolumeSource struct {
|
||||
// +optional
|
||||
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
|
||||
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
// +optional
|
||||
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
|
||||
@ -100,21 +105,27 @@ type VolumeSource struct {
|
||||
// +optional
|
||||
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
|
||||
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
// +optional
|
||||
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
|
||||
// flexVolume represents a generic volume resource that is
|
||||
// provisioned/attached using an exec based plugin.
|
||||
// Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
|
||||
// +optional
|
||||
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
|
||||
// cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
|
||||
// are redirected to the cinder.csi.openstack.org CSI driver.
|
||||
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
|
||||
// +optional
|
||||
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
|
||||
// +optional
|
||||
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
|
||||
// Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
|
||||
// +optional
|
||||
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
|
||||
// downwardAPI represents downward API about the pod that should populate this volume
|
||||
@ -124,34 +135,47 @@ type VolumeSource struct {
|
||||
// +optional
|
||||
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
|
||||
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
|
||||
// are redirected to the file.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
|
||||
// configMap represents a configMap that should populate this volume
|
||||
// +optional
|
||||
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
|
||||
// are redirected to the csi.vsphere.vmware.com CSI driver.
|
||||
// +optional
|
||||
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
|
||||
// +optional
|
||||
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
|
||||
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
|
||||
// are redirected to the disk.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
|
||||
// Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
|
||||
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
|
||||
// projected items for all in one resources secrets, configmaps, and downward API
|
||||
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
|
||||
// are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
|
||||
// is on.
|
||||
// +optional
|
||||
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
|
||||
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
|
||||
// +optional
|
||||
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
|
||||
// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
|
||||
// Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
|
||||
// +optional
|
||||
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
|
||||
// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
|
||||
// +optional
|
||||
CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
|
||||
// ephemeral represents a volume that is handled by a cluster storage driver.
|
||||
@ -219,11 +243,15 @@ type PersistentVolumeClaimVolumeSource struct {
|
||||
type PersistentVolumeSource struct {
|
||||
// gcePersistentDisk represents a GCE Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
|
||||
// Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
|
||||
// gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
|
||||
// +optional
|
||||
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
|
||||
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
|
||||
// kubelet's host machine and then exposed to the pod.
|
||||
// Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
|
||||
// awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
|
||||
// +optional
|
||||
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
|
||||
@ -236,6 +264,7 @@ type PersistentVolumeSource struct {
|
||||
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
|
||||
// glusterfs represents a Glusterfs volume that is attached to a host and
|
||||
// exposed to the pod. Provisioned by an admin.
|
||||
// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
|
||||
// +optional
|
||||
Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
|
||||
@ -244,6 +273,7 @@ type PersistentVolumeSource struct {
|
||||
// +optional
|
||||
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
|
||||
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/rbd/README.md
|
||||
// +optional
|
||||
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
|
||||
@ -252,50 +282,68 @@ type PersistentVolumeSource struct {
|
||||
// +optional
|
||||
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
|
||||
// cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
|
||||
// are redirected to the cinder.csi.openstack.org CSI driver.
|
||||
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
|
||||
// +optional
|
||||
Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
|
||||
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
|
||||
// +optional
|
||||
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
|
||||
// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
|
||||
// +optional
|
||||
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
|
||||
// flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running.
|
||||
// Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
|
||||
// +optional
|
||||
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
|
||||
// flexVolume represents a generic volume resource that is
|
||||
// provisioned/attached using an exec based plugin.
|
||||
// Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
|
||||
// +optional
|
||||
FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
|
||||
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
|
||||
// are redirected to the file.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
|
||||
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
|
||||
// are redirected to the csi.vsphere.vmware.com CSI driver.
|
||||
// +optional
|
||||
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
|
||||
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
|
||||
// Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
|
||||
// +optional
|
||||
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
|
||||
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
// Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
|
||||
// are redirected to the disk.csi.azure.com CSI driver.
|
||||
// +optional
|
||||
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
|
||||
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
|
||||
// Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
|
||||
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
|
||||
// Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
|
||||
// are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
|
||||
// is on.
|
||||
// +optional
|
||||
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
|
||||
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
// Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
|
||||
// +optional
|
||||
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
|
||||
// local represents directly-attached storage with node affinity
|
||||
// +optional
|
||||
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
|
||||
// storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
|
||||
// storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod.
|
||||
// Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
|
||||
// More info: https://examples.k8s.io/volumes/storageos/README.md
|
||||
// +optional
|
||||
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
|
||||
// csi represents storage that is handled by an external CSI driver (Beta feature).
|
||||
// csi represents storage that is handled by an external CSI driver.
|
||||
// +optional
|
||||
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
|
||||
}
|
||||
@ -582,6 +630,7 @@ type PersistentVolumeClaimSpec struct {
|
||||
VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
|
||||
}
|
||||
|
||||
// TypedObjectReference contains enough information to let you locate the typed referenced object
|
||||
type TypedObjectReference struct {
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
// If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
@ -688,7 +737,12 @@ type ModifyVolumeStatus struct {
|
||||
|
||||
// PersistentVolumeClaimCondition contains details about state of pvc
|
||||
type PersistentVolumeClaimCondition struct {
|
||||
// Type is the type of the condition.
|
||||
// More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about
|
||||
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
|
||||
// Status is the status of the condition.
|
||||
// Can be True, False, Unknown.
|
||||
// More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required
|
||||
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
|
||||
// lastProbeTime is the time we probed the condition.
|
||||
// +optional
|
||||
@ -2015,7 +2069,7 @@ type KeyToPath struct {
|
||||
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
|
||||
}
|
||||
|
||||
// Local represents directly-attached storage with node affinity (Beta feature)
|
||||
// Local represents directly-attached storage with node affinity
|
||||
type LocalVolumeSource struct {
|
||||
// path of the full path to the volume on the node.
|
||||
// It can be either a directory or block device (disk, partition, ...).
|
||||
@ -2029,7 +2083,7 @@ type LocalVolumeSource struct {
|
||||
FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
|
||||
}
|
||||
|
||||
// Represents storage that is managed by an external CSI volume driver (Beta feature)
|
||||
// Represents storage that is managed by an external CSI volume driver
|
||||
type CSIPersistentVolumeSource struct {
|
||||
// driver is the name of the driver to use for this volume.
|
||||
// Required.
|
||||
@ -2476,6 +2530,7 @@ type TCPSocketAction struct {
|
||||
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
|
||||
}
|
||||
|
||||
// GRPCAction specifies an action involving a GRPC service.
|
||||
type GRPCAction struct {
|
||||
// Port number of the gRPC service. Number must be in the range 1 to 65535.
|
||||
Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"`
|
||||
@ -2891,17 +2946,16 @@ type Container struct {
|
||||
// ProbeHandler defines a specific action that should be taken in a probe.
|
||||
// One and only one of the fields must be specified.
|
||||
type ProbeHandler struct {
|
||||
// Exec specifies the action to take.
|
||||
// Exec specifies a command to execute in the container.
|
||||
// +optional
|
||||
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
|
||||
// HTTPGet specifies the http request to perform.
|
||||
// HTTPGet specifies an HTTP GET request to perform.
|
||||
// +optional
|
||||
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
|
||||
// TCPSocket specifies an action involving a TCP port.
|
||||
// TCPSocket specifies a connection to a TCP port.
|
||||
// +optional
|
||||
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
|
||||
|
||||
// GRPC specifies an action involving a GRPC port.
|
||||
// GRPC specifies a GRPC HealthCheckRequest.
|
||||
// +optional
|
||||
GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"`
|
||||
}
|
||||
@ -2909,18 +2963,18 @@ type ProbeHandler struct {
|
||||
// LifecycleHandler defines a specific action that should be taken in a lifecycle
|
||||
// hook. One and only one of the fields, except TCPSocket must be specified.
|
||||
type LifecycleHandler struct {
|
||||
// Exec specifies the action to take.
|
||||
// Exec specifies a command to execute in the container.
|
||||
// +optional
|
||||
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
|
||||
// HTTPGet specifies the http request to perform.
|
||||
// HTTPGet specifies an HTTP GET request to perform.
|
||||
// +optional
|
||||
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
|
||||
// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
// for the backward compatibility. There are no validation of this field and
|
||||
// lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
// for backward compatibility. There is no validation of this field and
|
||||
// lifecycle hooks will fail at runtime when it is specified.
|
||||
// +optional
|
||||
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
|
||||
// Sleep represents the duration that the container should sleep before being terminated.
|
||||
// Sleep represents a duration that the container should sleep.
|
||||
// +featureGate=PodLifecycleSleepAction
|
||||
// +optional
|
||||
Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
|
||||
@ -3071,7 +3125,7 @@ type ContainerStatus struct {
|
||||
// AllocatedResources represents the compute resources allocated for this container by the
|
||||
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
|
||||
// and after successfully admitting desired pod resize.
|
||||
// +featureGate=InPlacePodVerticalScaling
|
||||
// +featureGate=InPlacePodVerticalScalingAllocatedStatus
|
||||
// +optional
|
||||
AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
|
||||
// Resources represents the compute resource requests and limits that have been successfully
|
||||
@ -3102,14 +3156,17 @@ type ContainerStatus struct {
|
||||
AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
|
||||
}
|
||||
|
||||
// ResourceStatus represents the status of a single resource allocated to a Pod.
|
||||
type ResourceStatus struct {
|
||||
// Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.
|
||||
// Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec.
|
||||
// For DRA resources, the value must be "claim:<claim_name>/<request>".
|
||||
// When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container.
|
||||
// +required
|
||||
Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
// List of unique Resources health. Each element in the list contains an unique resource ID and resource health.
|
||||
// At a minimum, ResourceID must uniquely identify the Resource
|
||||
// allocated to the Pod on the Node for the lifetime of a Pod.
|
||||
// See ResourceID type for it's definition.
|
||||
// List of unique resources health. Each element in the list contains an unique resource ID and its health.
|
||||
// At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node.
|
||||
// If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share.
|
||||
// See ResourceID type definition for a specific format it has in various use cases.
|
||||
// +listType=map
|
||||
// +listMapKey=resourceID
|
||||
Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
|
||||
@ -3126,16 +3183,16 @@ const (
|
||||
// ResourceID is calculated based on the source of this resource health information.
|
||||
// For DevicePlugin:
|
||||
//
|
||||
// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
|
||||
// DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
|
||||
//
|
||||
// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node.
|
||||
// For DRA:
|
||||
//
|
||||
// dra:<driver name>/<pool name>/<device name>: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
|
||||
// <driver name>/<pool name>/<device name>: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
|
||||
type ResourceID string
|
||||
|
||||
// ResourceHealth represents the health of a resource. It has the latest device health information.
|
||||
// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
|
||||
// This is a part of KEP https://kep.k8s.io/4680.
|
||||
type ResourceHealth struct {
|
||||
// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
|
||||
ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"`
|
||||
@ -3237,7 +3294,7 @@ const (
|
||||
// during scheduling, for example due to nodeAffinity parsing errors.
|
||||
PodReasonSchedulerError = "SchedulerError"
|
||||
|
||||
// TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination
|
||||
// PodReasonTerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination
|
||||
// is initiated by kubelet
|
||||
PodReasonTerminationByKubelet = "TerminationByKubelet"
|
||||
|
||||
@ -4030,6 +4087,20 @@ type PodSpec struct {
|
||||
// +featureGate=DynamicResourceAllocation
|
||||
// +optional
|
||||
ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
|
||||
// Resources is the total amount of CPU and Memory resources required by all
|
||||
// containers in the pod. It supports specifying Requests and Limits for
|
||||
// "cpu" and "memory" resource names only. ResourceClaims are not supported.
|
||||
//
|
||||
// This field enables fine-grained control over resource allocation for the
|
||||
// entire pod, allowing resource sharing among containers in a pod.
|
||||
// TODO: For beta graduation, expand this comment with a detailed explanation.
|
||||
//
|
||||
// This is an alpha field and requires enabling the PodLevelResources feature
|
||||
// gate.
|
||||
//
|
||||
// +featureGate=PodLevelResources
|
||||
// +optional
|
||||
Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
|
||||
}
|
||||
|
||||
// PodResourceClaim references exactly one ResourceClaim, either directly
|
||||
@ -4308,6 +4379,22 @@ const (
|
||||
SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict"
|
||||
)
|
||||
|
||||
// PodSELinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
|
||||
type PodSELinuxChangePolicy string
|
||||
|
||||
const (
|
||||
// Recursive relabeling of all Pod volumes by the container runtime.
|
||||
// This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
|
||||
SELinuxChangePolicyRecursive PodSELinuxChangePolicy = "Recursive"
|
||||
// MountOption mounts all eligible Pod volumes with `-o context` mount option.
|
||||
// This requires all Pods that share the same volume to use the same SELinux label.
|
||||
// It is not possible to share the same volume among privileged and unprivileged Pods.
|
||||
// Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
|
||||
// whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
|
||||
// CSIDriver instance. Other volumes are always re-labelled recursively.
|
||||
SELinuxChangePolicyMountOption PodSELinuxChangePolicy = "MountOption"
|
||||
)
|
||||
|
||||
// PodSecurityContext holds pod-level security attributes and common container settings.
|
||||
// Some fields are also present in container.securityContext. Field values of
|
||||
// container.securityContext take precedence over field values of PodSecurityContext.
|
||||
@ -4406,6 +4493,32 @@ type PodSecurityContext struct {
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,11,opt,name=appArmorProfile"`
|
||||
// seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
|
||||
// It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
|
||||
// Valid values are "MountOption" and "Recursive".
|
||||
//
|
||||
// "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
|
||||
// This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
|
||||
//
|
||||
// "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
|
||||
// This requires all Pods that share the same volume to use the same SELinux label.
|
||||
// It is not possible to share the same volume among privileged and unprivileged Pods.
|
||||
// Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
|
||||
// whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
|
||||
// CSIDriver instance. Other volumes are always re-labelled recursively.
|
||||
// "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
|
||||
//
|
||||
// If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
|
||||
// If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
|
||||
// and "Recursive" for all other volumes.
|
||||
//
|
||||
// This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
|
||||
//
|
||||
// All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +featureGate=SELinuxChangePolicy
|
||||
// +optional
|
||||
SELinuxChangePolicy *PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty" protobuf:"bytes,13,opt,name=seLinuxChangePolicy"`
|
||||
}
|
||||
|
||||
// SeccompProfile defines a pod/container's seccomp profile settings.
|
||||
@ -4513,8 +4626,10 @@ type PodDNSConfig struct {
|
||||
|
||||
// PodDNSConfigOption defines DNS resolver options of a pod.
|
||||
type PodDNSConfigOption struct {
|
||||
// Name is this DNS resolver option's name.
|
||||
// Required.
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
|
||||
// Value is this DNS resolver option's value.
|
||||
// +optional
|
||||
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
|
||||
}
|
||||
@ -4807,24 +4922,45 @@ type PodStatus struct {
|
||||
// +optional
|
||||
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
|
||||
|
||||
// The list has one entry per init container in the manifest. The most recent successful
|
||||
// Statuses of init containers in this pod. The most recent successful non-restartable
|
||||
// init container will have ready = true, the most recently started container will have
|
||||
// startTime set.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
|
||||
// Each init container in the pod should have at most one status in this list,
|
||||
// and all statuses should be for containers in the pod.
|
||||
// However this is not enforced.
|
||||
// If a status for a non-existent container is present in the list, or the list has duplicate names,
|
||||
// the behavior of various Kubernetes components is not defined and those statuses might be
|
||||
// ignored.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status
|
||||
// +listType=atomic
|
||||
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
|
||||
|
||||
// The list has one entry per container in the manifest.
|
||||
// Statuses of containers in this pod.
|
||||
// Each container in the pod should have at most one status in this list,
|
||||
// and all statuses should be for containers in the pod.
|
||||
// However this is not enforced.
|
||||
// If a status for a non-existent container is present in the list, or the list has duplicate names,
|
||||
// the behavior of various Kubernetes components is not defined and those statuses might be
|
||||
// ignored.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
|
||||
|
||||
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
|
||||
// See PodQOSClass type for available QOS classes
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes
|
||||
// +optional
|
||||
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
|
||||
// Status for any ephemeral containers that have run in this pod.
|
||||
|
||||
// Statuses for any ephemeral containers that have run in this pod.
|
||||
// Each ephemeral container in the pod should have at most one status in this list,
|
||||
// and all statuses should be for containers in the pod.
|
||||
// However this is not enforced.
|
||||
// If a status for a non-existent container is present in the list, or the list has duplicate names,
|
||||
// the behavior of various Kubernetes components is not defined and those statuses might be
|
||||
// ignored.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"`
|
||||
@ -4867,6 +5003,7 @@ type PodStatusResult struct {
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
|
||||
// +genclient:method=UpdateResize,verb=update,subresource=resize
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||
|
||||
@ -5558,7 +5695,7 @@ type ServiceSpec struct {
|
||||
// not set, the implementation will apply its default routing strategy. If set
|
||||
// to "PreferClose", implementations should prioritize endpoints that are
|
||||
// topologically close (e.g., same zone).
|
||||
// This is an alpha field and requires enabling ServiceTrafficDistribution feature.
|
||||
// This is a beta field and requires enabling ServiceTrafficDistribution feature.
|
||||
// +featureGate=ServiceTrafficDistribution
|
||||
// +optional
|
||||
TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"`
|
||||
@ -5692,6 +5829,8 @@ type ServiceAccount struct {
|
||||
|
||||
// Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
|
||||
// Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
|
||||
// The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32.
|
||||
// Prefer separate namespaces to isolate access to mounted secrets.
|
||||
// This field should not be used to find auto-generated service account token secrets for use outside of pods.
|
||||
// Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/secret
|
||||
@ -6092,7 +6231,7 @@ type NodeStatus struct {
|
||||
// +optional
|
||||
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
|
||||
// Conditions is an array of current observed node conditions.
|
||||
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
|
||||
// More info: https://kubernetes.io/docs/reference/node/node-status/#condition
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
@ -6101,7 +6240,7 @@ type NodeStatus struct {
|
||||
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
|
||||
// List of addresses reachable to the node.
|
||||
// Queried from cloud provider, if available.
|
||||
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
|
||||
// More info: https://kubernetes.io/docs/reference/node/node-status/#addresses
|
||||
// Note: This field is declared as mergeable, but the merge key is not sufficiently
|
||||
// unique, which can cause data corruption when it is merged. Callers should instead
|
||||
// use a full-replacement patch. See https://pr.k8s.io/79391 for an example.
|
||||
@ -6119,7 +6258,7 @@ type NodeStatus struct {
|
||||
// +optional
|
||||
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
|
||||
// Set of ids/uuids to uniquely identify the node.
|
||||
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
|
||||
// More info: https://kubernetes.io/docs/reference/node/node-status/#info
|
||||
// +optional
|
||||
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
|
||||
// List of container images on this node
|
||||
@ -6454,10 +6593,13 @@ type NamespaceCondition struct {
|
||||
Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"`
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
// +optional
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
|
||||
// Unique, one-word, CamelCase reason for the condition's last transition.
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
|
||||
// Human-readable message indicating details about last transition.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
|
||||
}
|
||||
@ -6508,7 +6650,6 @@ type NamespaceList struct {
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||
|
||||
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
|
||||
// Deprecated in 1.7, please use the bindings subresource of pods instead.
|
||||
type Binding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
@ -6528,6 +6669,15 @@ type Preconditions struct {
|
||||
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
|
||||
}
|
||||
|
||||
const (
|
||||
// LogStreamStdout is the stream type for stdout.
|
||||
LogStreamStdout = "Stdout"
|
||||
// LogStreamStderr is the stream type for stderr.
|
||||
LogStreamStderr = "Stderr"
|
||||
// LogStreamAll represents the combined stdout and stderr.
|
||||
LogStreamAll = "All"
|
||||
)
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.0
|
||||
@ -6562,7 +6712,8 @@ type PodLogOptions struct {
|
||||
// +optional
|
||||
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
|
||||
// If set, the number of lines from the end of the logs to show. If not specified,
|
||||
// logs are shown from the creation of the container or sinceSeconds or sinceTime
|
||||
// logs are shown from the creation of the container or sinceSeconds or sinceTime.
|
||||
// Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
|
||||
// +optional
|
||||
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
|
||||
// If set, the number of bytes to read from the server before terminating the
|
||||
@ -6579,6 +6730,14 @@ type PodLogOptions struct {
|
||||
// the actual log data coming from the real kubelet).
|
||||
// +optional
|
||||
InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"`
|
||||
|
||||
// Specify which container log stream to return to the client.
|
||||
// Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr
|
||||
// are returned interleaved.
|
||||
// Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
|
||||
// +featureGate=PodLogsQuerySplitStreams
|
||||
// +optional
|
||||
Stream *string `json:"stream,omitempty" protobuf:"varint,10,opt,name=stream"`
|
||||
}
|
||||
|
||||
// +k8s:conversion-gen:explicit-from=net/url.Values
|
||||
@ -6779,13 +6938,23 @@ type ObjectReference struct {
|
||||
|
||||
// LocalObjectReference contains enough information to let you locate the
|
||||
// referenced object inside the same namespace.
|
||||
// ---
|
||||
// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
|
||||
// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
|
||||
// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
|
||||
// Those cannot be well described when embedded.
|
||||
// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
|
||||
// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type
|
||||
// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
|
||||
//
|
||||
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
|
||||
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
|
||||
// +structType=atomic
|
||||
type LocalObjectReference struct {
|
||||
// Name of the referent.
|
||||
// This field is effectively required, but due to backwards compatibility is
|
||||
// allowed to be empty. Instances of this type with an empty value here are
|
||||
// almost certainly wrong.
|
||||
// TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
// +optional
|
||||
// +default=""
|
||||
@ -6796,6 +6965,20 @@ type LocalObjectReference struct {
|
||||
|
||||
// TypedLocalObjectReference contains enough information to let you locate the
|
||||
// typed referenced object inside the same namespace.
|
||||
// ---
|
||||
// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
|
||||
// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
|
||||
// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
|
||||
// Those cannot be well described when embedded.
|
||||
// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
|
||||
// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
|
||||
// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
|
||||
// and the version of the actual struct is irrelevant.
|
||||
// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type
|
||||
// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
|
||||
//
|
||||
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
|
||||
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
|
||||
// +structType=atomic
|
||||
type TypedLocalObjectReference struct {
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
@ -7729,7 +7912,6 @@ const (
|
||||
)
|
||||
|
||||
// PortStatus represents the error condition of a service port
|
||||
|
||||
type PortStatus struct {
|
||||
// Port is the port number of the service port of which status is recorded here
|
||||
Port int32 `json:"port" protobuf:"varint,1,opt,name=port"`
|
||||
|
131
api/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
131
api/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
@ -117,7 +117,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_Binding = map[string]string{
|
||||
"": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.",
|
||||
"": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"target": "The target object that you want to bind to the standard object.",
|
||||
}
|
||||
@ -127,7 +127,7 @@ func (Binding) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_CSIPersistentVolumeSource = map[string]string{
|
||||
"": "Represents storage that is managed by an external CSI volume driver (Beta feature)",
|
||||
"": "Represents storage that is managed by an external CSI volume driver",
|
||||
"driver": "driver is the name of the driver to use for this volume. Required.",
|
||||
"volumeHandle": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.",
|
||||
"readOnly": "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
|
||||
@ -802,6 +802,7 @@ func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_GRPCAction = map[string]string{
|
||||
"": "GRPCAction specifies an action involving a GRPC service.",
|
||||
"port": "Port number of the gRPC service. Number must be in the range 1 to 65535.",
|
||||
"service": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.",
|
||||
}
|
||||
@ -967,10 +968,10 @@ func (Lifecycle) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_LifecycleHandler = map[string]string{
|
||||
"": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.",
|
||||
"exec": "Exec specifies the action to take.",
|
||||
"httpGet": "HTTPGet specifies the http request to perform.",
|
||||
"tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.",
|
||||
"sleep": "Sleep represents the duration that the container should sleep before being terminated.",
|
||||
"exec": "Exec specifies a command to execute in the container.",
|
||||
"httpGet": "HTTPGet specifies an HTTP GET request to perform.",
|
||||
"tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for backward compatibility. There is no validation of this field and lifecycle hooks will fail at runtime when it is specified.",
|
||||
"sleep": "Sleep represents a duration that the container should sleep.",
|
||||
}
|
||||
|
||||
func (LifecycleHandler) SwaggerDoc() map[string]string {
|
||||
@ -1062,7 +1063,7 @@ func (LocalObjectReference) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_LocalVolumeSource = map[string]string{
|
||||
"": "Local represents directly-attached storage with node affinity (Beta feature)",
|
||||
"": "Local represents directly-attached storage with node affinity",
|
||||
"path": "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).",
|
||||
"fsType": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.",
|
||||
}
|
||||
@ -1107,6 +1108,9 @@ var map_NamespaceCondition = map[string]string{
|
||||
"": "NamespaceCondition contains details about state of namespace.",
|
||||
"type": "Type of namespace controller condition.",
|
||||
"status": "Status of the condition, one of True, False, Unknown.",
|
||||
"lastTransitionTime": "Last time the condition transitioned from one status to another.",
|
||||
"reason": "Unique, one-word, CamelCase reason for the condition's last transition.",
|
||||
"message": "Human-readable message indicating details about last transition.",
|
||||
}
|
||||
|
||||
func (NamespaceCondition) SwaggerDoc() map[string]string {
|
||||
@ -1315,10 +1319,10 @@ var map_NodeStatus = map[string]string{
|
||||
"capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
|
||||
"allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
|
||||
"phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
|
||||
"conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition",
|
||||
"addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).",
|
||||
"conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition",
|
||||
"addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).",
|
||||
"daemonEndpoints": "Endpoints of daemons running on the Node.",
|
||||
"nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info",
|
||||
"nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info",
|
||||
"images": "List of container images on this node",
|
||||
"volumesInUse": "List of attachable volumes in use (mounted) by the node.",
|
||||
"volumesAttached": "List of volumes that are attached to the node.",
|
||||
@ -1398,6 +1402,8 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_PersistentVolumeClaimCondition = map[string]string{
|
||||
"": "PersistentVolumeClaimCondition contains details about state of pvc",
|
||||
"type": "Type is the type of the condition. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about",
|
||||
"status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required",
|
||||
"lastProbeTime": "lastProbeTime is the time we probed the condition.",
|
||||
"lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.",
|
||||
"reason": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.",
|
||||
@ -1483,28 +1489,28 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_PersistentVolumeSource = map[string]string{
|
||||
"": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.",
|
||||
"gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
|
||||
"awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
|
||||
"gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
|
||||
"awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
|
||||
"hostPath": "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
|
||||
"glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
|
||||
"glusterfs": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
|
||||
"nfs": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
|
||||
"rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
|
||||
"rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
|
||||
"iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
|
||||
"cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
|
||||
"cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
|
||||
"cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
|
||||
"cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
|
||||
"fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
|
||||
"flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running",
|
||||
"flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
|
||||
"azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
|
||||
"vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
|
||||
"quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
|
||||
"azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
|
||||
"photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
|
||||
"portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
|
||||
"scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
"flocker": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.",
|
||||
"flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
|
||||
"azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.",
|
||||
"vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.",
|
||||
"quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.",
|
||||
"azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.",
|
||||
"photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.",
|
||||
"portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.",
|
||||
"scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.",
|
||||
"local": "local represents directly-attached storage with node affinity",
|
||||
"storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md",
|
||||
"csi": "csi represents storage that is handled by an external CSI driver (Beta feature).",
|
||||
"storageos": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. More info: https://examples.k8s.io/volumes/storageos/README.md",
|
||||
"csi": "csi represents storage that is handled by an external CSI driver.",
|
||||
}
|
||||
|
||||
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
|
||||
@ -1635,7 +1641,8 @@ func (PodDNSConfig) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_PodDNSConfigOption = map[string]string{
|
||||
"": "PodDNSConfigOption defines DNS resolver options of a pod.",
|
||||
"name": "Required.",
|
||||
"name": "Name is this DNS resolver option's name. Required.",
|
||||
"value": "Value is this DNS resolver option's value.",
|
||||
}
|
||||
|
||||
func (PodDNSConfigOption) SwaggerDoc() map[string]string {
|
||||
@ -1683,9 +1690,10 @@ var map_PodLogOptions = map[string]string{
|
||||
"sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
|
||||
"timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
|
||||
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime",
|
||||
"tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".",
|
||||
"limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
|
||||
"insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).",
|
||||
"stream": "Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".",
|
||||
}
|
||||
|
||||
func (PodLogOptions) SwaggerDoc() map[string]string {
|
||||
@ -1772,6 +1780,7 @@ var map_PodSecurityContext = map[string]string{
|
||||
"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
|
||||
"seccompProfile": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
|
||||
"appArmorProfile": "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
|
||||
"seLinuxChangePolicy": "seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\".\n\n\"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.\n\n\"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled.\n\nIf not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes.\n\nThis field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.\n\nAll Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows.",
|
||||
}
|
||||
|
||||
func (PodSecurityContext) SwaggerDoc() map[string]string {
|
||||
@ -1828,6 +1837,7 @@ var map_PodSpec = map[string]string{
|
||||
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
|
||||
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
|
||||
"resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
|
||||
"resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
|
||||
}
|
||||
|
||||
func (PodSpec) SwaggerDoc() map[string]string {
|
||||
@ -1846,10 +1856,10 @@ var map_PodStatus = map[string]string{
|
||||
"podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
|
||||
"podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
|
||||
"startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
|
||||
"initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
|
||||
"containerStatuses": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
|
||||
"initContainerStatuses": "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
|
||||
"containerStatuses": "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
|
||||
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
|
||||
"ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.",
|
||||
"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
|
||||
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"",
|
||||
"resourceClaimStatuses": "Status of resource claims.",
|
||||
}
|
||||
@ -1899,6 +1909,7 @@ func (PodTemplateSpec) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_PortStatus = map[string]string{
|
||||
"": "PortStatus represents the error condition of a service port",
|
||||
"port": "Port is the port number of the service port of which status is recorded here",
|
||||
"protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"",
|
||||
"error": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.",
|
||||
@ -1966,10 +1977,10 @@ func (Probe) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_ProbeHandler = map[string]string{
|
||||
"": "ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified.",
|
||||
"exec": "Exec specifies the action to take.",
|
||||
"httpGet": "HTTPGet specifies the http request to perform.",
|
||||
"tcpSocket": "TCPSocket specifies an action involving a TCP port.",
|
||||
"grpc": "GRPC specifies an action involving a GRPC port.",
|
||||
"exec": "Exec specifies a command to execute in the container.",
|
||||
"httpGet": "HTTPGet specifies an HTTP GET request to perform.",
|
||||
"tcpSocket": "TCPSocket specifies a connection to a TCP port.",
|
||||
"grpc": "GRPC specifies a GRPC HealthCheckRequest.",
|
||||
}
|
||||
|
||||
func (ProbeHandler) SwaggerDoc() map[string]string {
|
||||
@ -2125,7 +2136,7 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_ResourceHealth = map[string]string{
|
||||
"": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.",
|
||||
"": "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680.",
|
||||
"resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.",
|
||||
"health": "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n since we do not have a mechanism today to distinguish\n temporary and permanent issues.\n - Unknown: The status cannot be determined.\n For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.",
|
||||
}
|
||||
@ -2188,8 +2199,9 @@ func (ResourceRequirements) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_ResourceStatus = map[string]string{
|
||||
"name": "Name of the resource. Must be unique within the pod and match one of the resources from the pod spec.",
|
||||
"resources": "List of unique Resources health. Each element in the list contains an unique resource ID and resource health. At a minimum, ResourceID must uniquely identify the Resource allocated to the Pod on the Node for the lifetime of a Pod. See ResourceID type for it's definition.",
|
||||
"": "ResourceStatus represents the status of a single resource allocated to a Pod.",
|
||||
"name": "Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. For DRA resources, the value must be \"claim:<claim_name>/<request>\". When this status is reported about a container, the \"claim_name\" and \"request\" must match one of the claims of this container.",
|
||||
"resources": "List of unique resources health. Each element in the list contains an unique resource ID and its health. At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. See ResourceID type definition for a specific format it has in various use cases.",
|
||||
}
|
||||
|
||||
func (ResourceStatus) SwaggerDoc() map[string]string {
|
||||
@ -2391,7 +2403,7 @@ func (Service) SwaggerDoc() map[string]string {
|
||||
var map_ServiceAccount = map[string]string{
|
||||
"": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret",
|
||||
"secrets": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". The \"kubernetes.io/enforce-mountable-secrets\" annotation is deprecated since v1.32. Prefer separate namespaces to isolate access to mounted secrets. This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret",
|
||||
"imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod",
|
||||
"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.",
|
||||
}
|
||||
@ -2475,7 +2487,7 @@ var map_ServiceSpec = map[string]string{
|
||||
"allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.",
|
||||
"loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.",
|
||||
"internalTrafficPolicy": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).",
|
||||
"trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is an alpha field and requires enabling ServiceTrafficDistribution feature.",
|
||||
"trafficDistribution": "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.",
|
||||
}
|
||||
|
||||
func (ServiceSpec) SwaggerDoc() map[string]string {
|
||||
@ -2628,6 +2640,7 @@ func (TypedLocalObjectReference) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_TypedObjectReference = map[string]string{
|
||||
"": "TypedObjectReference contains enough information to let you locate the typed referenced object",
|
||||
"apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
|
||||
"kind": "Kind is the type of resource being referenced",
|
||||
"name": "Name is the name of resource being referenced",
|
||||
@ -2720,32 +2733,32 @@ var map_VolumeSource = map[string]string{
|
||||
"": "Represents the source of a volume to mount. Only one of its members may be specified.",
|
||||
"hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
|
||||
"emptyDir": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
|
||||
"gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
|
||||
"awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
|
||||
"gitRepo": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
|
||||
"gcePersistentDisk": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
|
||||
"awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
|
||||
"gitRepo": "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
|
||||
"secret": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
|
||||
"nfs": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
|
||||
"iscsi": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
|
||||
"glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
|
||||
"glusterfs": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
|
||||
"persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
|
||||
"rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
|
||||
"flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
|
||||
"cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
|
||||
"cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
|
||||
"flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
|
||||
"rbd": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
|
||||
"flexVolume": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
|
||||
"cinder": "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
|
||||
"cephfs": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
|
||||
"flocker": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.",
|
||||
"downwardAPI": "downwardAPI represents downward API about the pod that should populate this volume",
|
||||
"fc": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
|
||||
"azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
|
||||
"azureFile": "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.",
|
||||
"configMap": "configMap represents a configMap that should populate this volume",
|
||||
"vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
|
||||
"quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
|
||||
"azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
|
||||
"photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
|
||||
"vsphereVolume": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.",
|
||||
"quobyte": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.",
|
||||
"azureDisk": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.",
|
||||
"photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.",
|
||||
"projected": "projected items for all in one resources secrets, configmaps, and downward API",
|
||||
"portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
|
||||
"scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
||||
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
|
||||
"portworxVolume": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.",
|
||||
"scaleIO": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.",
|
||||
"storageos": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.",
|
||||
"csi": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
|
||||
"ephemeral": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
|
||||
"image": "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
|
||||
}
|
||||
|
15
api/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
15
api/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
@ -3935,6 +3935,11 @@ func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.Stream != nil {
|
||||
in, out := &in.Stream, &out.Stream
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -4169,6 +4174,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
|
||||
*out = new(AppArmorProfile)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SELinuxChangePolicy != nil {
|
||||
in, out := &in.SELinuxChangePolicy, &out.SELinuxChangePolicy
|
||||
*out = new(PodSELinuxChangePolicy)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -4361,6 +4371,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
4
api/vendor/k8s.io/api/storage/v1/generated.proto
generated
vendored
4
api/vendor/k8s.io/api/storage/v1/generated.proto
generated
vendored
@ -491,8 +491,8 @@ message VolumeAttachmentList {
|
||||
}
|
||||
|
||||
// VolumeAttachmentSource represents a volume that should be attached.
|
||||
// Right now only PersistenVolumes can be attached via external attacher,
|
||||
// in future we may allow also inline volumes in pods.
|
||||
// Right now only PersistentVolumes can be attached via external attacher,
|
||||
// in the future we may allow also inline volumes in pods.
|
||||
// Exactly one member can be set.
|
||||
message VolumeAttachmentSource {
|
||||
// persistentVolumeName represents the name of the persistent volume to attach.
|
||||
|
6
api/vendor/k8s.io/api/storage/v1/types.go
generated
vendored
6
api/vendor/k8s.io/api/storage/v1/types.go
generated
vendored
@ -169,8 +169,8 @@ type VolumeAttachmentSpec struct {
|
||||
}
|
||||
|
||||
// VolumeAttachmentSource represents a volume that should be attached.
|
||||
// Right now only PersistenVolumes can be attached via external attacher,
|
||||
// in future we may allow also inline volumes in pods.
|
||||
// Right now only PersistentVolumes can be attached via external attacher,
|
||||
// in the future we may allow also inline volumes in pods.
|
||||
// Exactly one member can be set.
|
||||
type VolumeAttachmentSource struct {
|
||||
// persistentVolumeName represents the name of the persistent volume to attach.
|
||||
@ -433,7 +433,7 @@ const (
|
||||
// ReadWriteOnceWithFSTypeFSGroupPolicy indicates that each volume will be examined
|
||||
// to determine if the volume ownership and permissions
|
||||
// should be modified. If a fstype is defined and the volume's access mode
|
||||
// contains ReadWriteOnce, then the defined fsGroup will be applied.
|
||||
// contains ReadWriteOnce or ReadWriteOncePod, then the defined fsGroup will be applied.
|
||||
// This mode should be defined if it's expected that the
|
||||
// fsGroup may need to be modified depending on the pod's SecurityPolicy.
|
||||
// This is the default behavior if no other FSGroupPolicy is defined.
|
||||
|
2
api/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
generated
vendored
2
api/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
generated
vendored
@ -185,7 +185,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_VolumeAttachmentSource = map[string]string{
|
||||
"": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.",
|
||||
"": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistentVolumes can be attached via external attacher, in the future we may allow also inline volumes in pods. Exactly one member can be set.",
|
||||
"persistentVolumeName": "persistentVolumeName represents the name of the persistent volume to attach.",
|
||||
}
|
||||
|
||||
|
39
api/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
39
api/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
math "math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -460,9 +460,10 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// AsApproximateFloat64 returns a float64 representation of the quantity which may
|
||||
// lose precision. If the value of the quantity is outside the range of a float64
|
||||
// +Inf/-Inf will be returned.
|
||||
// AsApproximateFloat64 returns a float64 representation of the quantity which
|
||||
// may lose precision. If precision matter more than performance, see
|
||||
// AsFloat64Slow. If the value of the quantity is outside the range of a
|
||||
// float64 +Inf/-Inf will be returned.
|
||||
func (q *Quantity) AsApproximateFloat64() float64 {
|
||||
var base float64
|
||||
var exponent int
|
||||
@ -480,6 +481,36 @@ func (q *Quantity) AsApproximateFloat64() float64 {
|
||||
return base * math.Pow10(exponent)
|
||||
}
|
||||
|
||||
// AsFloat64Slow returns a float64 representation of the quantity. This is
|
||||
// more precise than AsApproximateFloat64 but significantly slower. If the
|
||||
// value of the quantity is outside the range of a float64 +Inf/-Inf will be
|
||||
// returned.
|
||||
func (q *Quantity) AsFloat64Slow() float64 {
|
||||
infDec := q.AsDec()
|
||||
|
||||
var absScale int64
|
||||
if infDec.Scale() < 0 {
|
||||
absScale = int64(-infDec.Scale())
|
||||
} else {
|
||||
absScale = int64(infDec.Scale())
|
||||
}
|
||||
pow10AbsScale := big.NewInt(10)
|
||||
pow10AbsScale = pow10AbsScale.Exp(pow10AbsScale, big.NewInt(absScale), nil)
|
||||
|
||||
var resultBigFloat *big.Float
|
||||
if infDec.Scale() < 0 {
|
||||
resultBigInt := new(big.Int).Mul(infDec.UnscaledBig(), pow10AbsScale)
|
||||
resultBigFloat = new(big.Float).SetInt(resultBigInt)
|
||||
} else {
|
||||
pow10AbsScaleFloat := new(big.Float).SetInt(pow10AbsScale)
|
||||
resultBigFloat = new(big.Float).SetInt(infDec.UnscaledBig())
|
||||
resultBigFloat = resultBigFloat.Quo(resultBigFloat, pow10AbsScaleFloat)
|
||||
}
|
||||
|
||||
result, _ := resultBigFloat.Float64()
|
||||
return result
|
||||
}
|
||||
|
||||
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
|
||||
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
|
||||
func (q *Quantity) AsInt64() (int64, bool) {
|
||||
|
3
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
generated
vendored
3
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
generated
vendored
@ -11,6 +11,7 @@ reviewers:
|
||||
- luxas
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- ncdc
|
||||
- soltysh
|
||||
- dims
|
||||
emeritus_reviewers:
|
||||
- ncdc
|
||||
|
400
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
generated
vendored
400
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
generated
vendored
@ -1355,187 +1355,190 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptor_a8431b6e0aeeb761 = []byte{
|
||||
// 2873 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57,
|
||||
0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa,
|
||||
0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44,
|
||||
0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49,
|
||||
0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f,
|
||||
0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84,
|
||||
0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7,
|
||||
0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e,
|
||||
0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26,
|
||||
0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03,
|
||||
0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5,
|
||||
0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf,
|
||||
0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b,
|
||||
0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e,
|
||||
0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76,
|
||||
0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f,
|
||||
0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28,
|
||||
0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26,
|
||||
0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28,
|
||||
0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29,
|
||||
0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f,
|
||||
0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58,
|
||||
0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d,
|
||||
0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86,
|
||||
0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5,
|
||||
0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a,
|
||||
0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e,
|
||||
0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a,
|
||||
0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb,
|
||||
0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71,
|
||||
0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53,
|
||||
0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7,
|
||||
0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82,
|
||||
0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5,
|
||||
0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95,
|
||||
0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b,
|
||||
0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8,
|
||||
0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75,
|
||||
0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b,
|
||||
0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09,
|
||||
0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26,
|
||||
0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3,
|
||||
0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3,
|
||||
0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74,
|
||||
0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81,
|
||||
0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2,
|
||||
0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22,
|
||||
0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1,
|
||||
0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60,
|
||||
0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6,
|
||||
0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1,
|
||||
0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82,
|
||||
0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8,
|
||||
0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8,
|
||||
0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56,
|
||||
0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e,
|
||||
0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47,
|
||||
0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc,
|
||||
0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79,
|
||||
0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13,
|
||||
0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77,
|
||||
0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a,
|
||||
0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e,
|
||||
0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45,
|
||||
0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98,
|
||||
0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3,
|
||||
0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e,
|
||||
0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18,
|
||||
0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d,
|
||||
0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69,
|
||||
0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91,
|
||||
0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54,
|
||||
0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63,
|
||||
0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b,
|
||||
0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92,
|
||||
0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23,
|
||||
0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57,
|
||||
0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b,
|
||||
0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a,
|
||||
0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f,
|
||||
0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c,
|
||||
0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f,
|
||||
0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e,
|
||||
0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41,
|
||||
0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37,
|
||||
0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf,
|
||||
0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73,
|
||||
0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15,
|
||||
0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88,
|
||||
0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69,
|
||||
0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7,
|
||||
0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d,
|
||||
0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7,
|
||||
0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7,
|
||||
0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14,
|
||||
0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1,
|
||||
0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17,
|
||||
0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc,
|
||||
0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f,
|
||||
0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98,
|
||||
0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b,
|
||||
0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51,
|
||||
0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c,
|
||||
0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a,
|
||||
0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3,
|
||||
0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e,
|
||||
0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd,
|
||||
0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf,
|
||||
0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b,
|
||||
0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65,
|
||||
0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6,
|
||||
0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5,
|
||||
0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59,
|
||||
0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc,
|
||||
0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17,
|
||||
0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6,
|
||||
0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75,
|
||||
0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43,
|
||||
0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86,
|
||||
0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c,
|
||||
0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53,
|
||||
0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54,
|
||||
0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a,
|
||||
0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2,
|
||||
0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91,
|
||||
0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77,
|
||||
0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4,
|
||||
0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1,
|
||||
0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0,
|
||||
0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8,
|
||||
0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8,
|
||||
0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30,
|
||||
0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a,
|
||||
0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39,
|
||||
0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb,
|
||||
0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95,
|
||||
0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0,
|
||||
0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04,
|
||||
0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66,
|
||||
0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d,
|
||||
0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0,
|
||||
0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1,
|
||||
0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09,
|
||||
0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd,
|
||||
0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70,
|
||||
0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a,
|
||||
0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2,
|
||||
0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5,
|
||||
0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9,
|
||||
0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80,
|
||||
0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a,
|
||||
0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef,
|
||||
0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2,
|
||||
0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8,
|
||||
0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06,
|
||||
0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f,
|
||||
0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a,
|
||||
0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81,
|
||||
0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52,
|
||||
0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f,
|
||||
0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad,
|
||||
0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78,
|
||||
0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03,
|
||||
0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52,
|
||||
0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a,
|
||||
0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19,
|
||||
0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90,
|
||||
0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8,
|
||||
0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda,
|
||||
0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98,
|
||||
0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73,
|
||||
0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0,
|
||||
0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82,
|
||||
0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc,
|
||||
0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f,
|
||||
0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f,
|
||||
0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31,
|
||||
0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31,
|
||||
0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff,
|
||||
0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00,
|
||||
// 2928 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4d, 0x6c, 0x24, 0x47,
|
||||
0xd5, 0xee, 0xf9, 0xb1, 0x67, 0xde, 0x78, 0xfc, 0x53, 0xeb, 0xfd, 0xbe, 0x59, 0x23, 0x3c, 0x4e,
|
||||
0x27, 0x8a, 0x36, 0x90, 0x8c, 0x77, 0x97, 0x25, 0xda, 0x6c, 0x48, 0xc0, 0xe3, 0x59, 0x6f, 0x9c,
|
||||
0xac, 0x63, 0xab, 0xbc, 0xbb, 0x81, 0x10, 0xa1, 0x94, 0xa7, 0xcb, 0xe3, 0xc6, 0x3d, 0xdd, 0x93,
|
||||
0xaa, 0x1e, 0x6f, 0x06, 0x0e, 0xe4, 0x00, 0x12, 0x48, 0x28, 0x0a, 0x37, 0x4e, 0x28, 0x11, 0x9c,
|
||||
0x38, 0x21, 0x4e, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xac, 0x39, 0x70, 0x44,
|
||||
0x5c, 0x2d, 0x24, 0x50, 0xfd, 0xf4, 0xdf, 0xfc, 0xac, 0x7b, 0x76, 0x97, 0x88, 0xdb, 0xf4, 0xfb,
|
||||
0xaf, 0xaa, 0xf7, 0x5e, 0xbd, 0xf7, 0x6a, 0xe0, 0xea, 0xd1, 0x35, 0x5e, 0xb3, 0xbd, 0x35, 0xd2,
|
||||
0xb1, 0xdb, 0xa4, 0x79, 0x68, 0xbb, 0x94, 0xf5, 0xd6, 0x3a, 0x47, 0x2d, 0x01, 0xe0, 0x6b, 0x6d,
|
||||
0xea, 0x93, 0xb5, 0xe3, 0xcb, 0x6b, 0x2d, 0xea, 0x52, 0x46, 0x7c, 0x6a, 0xd5, 0x3a, 0xcc, 0xf3,
|
||||
0x3d, 0xf4, 0x94, 0xe2, 0xaa, 0xc5, 0xb9, 0x6a, 0x9d, 0xa3, 0x96, 0x00, 0xf0, 0x9a, 0xe0, 0xaa,
|
||||
0x1d, 0x5f, 0x5e, 0x7e, 0xae, 0x65, 0xfb, 0x87, 0xdd, 0xfd, 0x5a, 0xd3, 0x6b, 0xaf, 0xb5, 0xbc,
|
||||
0x96, 0xb7, 0x26, 0x99, 0xf7, 0xbb, 0x07, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x4a, 0xe8, 0xf2, 0xda,
|
||||
0x38, 0x53, 0x58, 0xd7, 0xf5, 0xed, 0x36, 0x1d, 0xb4, 0x62, 0xf9, 0xf9, 0xb3, 0x18, 0x78, 0xf3,
|
||||
0x90, 0xb6, 0xc9, 0x20, 0x9f, 0xf9, 0xc7, 0x2c, 0x14, 0xd6, 0x77, 0xb7, 0x6e, 0x32, 0xaf, 0xdb,
|
||||
0x41, 0xab, 0x90, 0x73, 0x49, 0x9b, 0x56, 0x8c, 0x55, 0xe3, 0x62, 0xb1, 0x3e, 0xfb, 0x71, 0xbf,
|
||||
0x3a, 0x75, 0xd2, 0xaf, 0xe6, 0x5e, 0x27, 0x6d, 0x8a, 0x25, 0x06, 0x39, 0x50, 0x38, 0xa6, 0x8c,
|
||||
0xdb, 0x9e, 0xcb, 0x2b, 0x99, 0xd5, 0xec, 0xc5, 0xd2, 0x95, 0x97, 0x6b, 0x69, 0xd6, 0x5f, 0x93,
|
||||
0x0a, 0xee, 0x2a, 0xd6, 0x4d, 0x8f, 0x35, 0x6c, 0xde, 0xf4, 0x8e, 0x29, 0xeb, 0xd5, 0x17, 0xb4,
|
||||
0x96, 0x82, 0x46, 0x72, 0x1c, 0x6a, 0x40, 0x3f, 0x34, 0x60, 0xa1, 0xc3, 0xe8, 0x01, 0x65, 0x8c,
|
||||
0x5a, 0x1a, 0x5f, 0xc9, 0xae, 0x1a, 0x8f, 0x41, 0x6d, 0x45, 0xab, 0x5d, 0xd8, 0x1d, 0x90, 0x8f,
|
||||
0x87, 0x34, 0xa2, 0x5f, 0x1a, 0xb0, 0xcc, 0x29, 0x3b, 0xa6, 0x6c, 0xdd, 0xb2, 0x18, 0xe5, 0xbc,
|
||||
0xde, 0xdb, 0x70, 0x6c, 0xea, 0xfa, 0x1b, 0x5b, 0x0d, 0xcc, 0x2b, 0x39, 0xb9, 0x0f, 0x5f, 0x4f,
|
||||
0x67, 0xd0, 0xde, 0x38, 0x39, 0x75, 0x53, 0x5b, 0xb4, 0x3c, 0x96, 0x84, 0xe3, 0x07, 0x98, 0x61,
|
||||
0x1e, 0xc0, 0x6c, 0x70, 0x90, 0xb7, 0x6c, 0xee, 0xa3, 0xbb, 0x30, 0xdd, 0x12, 0x1f, 0xbc, 0x62,
|
||||
0x48, 0x03, 0x6b, 0xe9, 0x0c, 0x0c, 0x64, 0xd4, 0xe7, 0xb4, 0x3d, 0xd3, 0xf2, 0x93, 0x63, 0x2d,
|
||||
0xcd, 0xfc, 0x49, 0x0e, 0x4a, 0xeb, 0xbb, 0x5b, 0x98, 0x72, 0xaf, 0xcb, 0x9a, 0x34, 0x85, 0xd3,
|
||||
0x5c, 0x83, 0x59, 0x6e, 0xbb, 0xad, 0xae, 0x43, 0x98, 0x80, 0x56, 0xa6, 0x25, 0xe5, 0x92, 0xa6,
|
||||
0x9c, 0xdd, 0x8b, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x01, 0x10, 0x12, 0x78, 0x87, 0x34, 0xa9, 0x55,
|
||||
0xc9, 0xac, 0x1a, 0x17, 0x0b, 0x75, 0xa4, 0xf9, 0xe0, 0xf5, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x49,
|
||||
0xc8, 0x4b, 0x4b, 0x2b, 0x05, 0xa9, 0xa6, 0xac, 0xc9, 0xf3, 0x72, 0x19, 0x58, 0xe1, 0xd0, 0x33,
|
||||
0x30, 0xa3, 0xbd, 0xac, 0x52, 0x94, 0x64, 0xf3, 0x9a, 0x6c, 0x26, 0x70, 0x83, 0x00, 0x2f, 0xd6,
|
||||
0x77, 0x64, 0xbb, 0x96, 0xf4, 0xbb, 0xd8, 0xfa, 0x5e, 0xb3, 0x5d, 0x0b, 0x4b, 0x0c, 0xba, 0x05,
|
||||
0xf9, 0x63, 0xca, 0xf6, 0x85, 0x27, 0x08, 0xd7, 0xfc, 0x72, 0xba, 0x8d, 0xbe, 0x2b, 0x58, 0xea,
|
||||
0x45, 0x61, 0x9a, 0xfc, 0x89, 0x95, 0x10, 0x54, 0x03, 0xe0, 0x87, 0x1e, 0xf3, 0xe5, 0xf2, 0x2a,
|
||||
0xf9, 0xd5, 0xec, 0xc5, 0x62, 0x7d, 0x4e, 0xac, 0x77, 0x2f, 0x84, 0xe2, 0x18, 0x85, 0xa0, 0x6f,
|
||||
0x12, 0x9f, 0xb6, 0x3c, 0x66, 0x53, 0x5e, 0x99, 0x89, 0xe8, 0x37, 0x42, 0x28, 0x8e, 0x51, 0xa0,
|
||||
0x57, 0x01, 0x71, 0xdf, 0x63, 0xa4, 0x45, 0xf5, 0x52, 0x5f, 0x21, 0xfc, 0xb0, 0x02, 0x72, 0x75,
|
||||
0xcb, 0x7a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x6b, 0xc0, 0x7c, 0xcc, 0x17,
|
||||
0xa4, 0xdf, 0x5d, 0x83, 0xd9, 0x56, 0x2c, 0xea, 0xb4, 0x5f, 0x84, 0xa7, 0x1d, 0x8f, 0x48, 0x9c,
|
||||
0xa0, 0x44, 0x14, 0x8a, 0x4c, 0x4b, 0x0a, 0xb2, 0xcb, 0xe5, 0xd4, 0x4e, 0x1b, 0xd8, 0x10, 0x69,
|
||||
0x8a, 0x01, 0x39, 0x8e, 0x24, 0x9b, 0x7f, 0x37, 0xa4, 0x03, 0x07, 0xf9, 0x06, 0x5d, 0x8c, 0xe5,
|
||||
0x34, 0x43, 0x6e, 0xdf, 0xec, 0x98, 0x7c, 0x74, 0x46, 0x22, 0xc8, 0xfc, 0x4f, 0x24, 0x82, 0xeb,
|
||||
0x85, 0x9f, 0x7f, 0x58, 0x9d, 0x7a, 0xef, 0xaf, 0xab, 0x53, 0xe6, 0xcf, 0x0c, 0x98, 0x5d, 0xef,
|
||||
0x74, 0x9c, 0xde, 0x4e, 0xc7, 0x97, 0x0b, 0x30, 0x61, 0xda, 0x62, 0x3d, 0xdc, 0x75, 0xf5, 0x42,
|
||||
0x41, 0xc4, 0x77, 0x43, 0x42, 0xb0, 0xc6, 0x88, 0xf8, 0x39, 0xf0, 0x58, 0x93, 0xea, 0x70, 0x0b,
|
||||
0xe3, 0x67, 0x53, 0x00, 0xb1, 0xc2, 0x89, 0x43, 0x3e, 0xb0, 0xa9, 0x63, 0x6d, 0x13, 0x97, 0xb4,
|
||||
0x28, 0xd3, 0xc1, 0x11, 0x6e, 0xfd, 0x66, 0x0c, 0x87, 0x13, 0x94, 0xe6, 0xbf, 0x33, 0x50, 0xdc,
|
||||
0xf0, 0x5c, 0xcb, 0xf6, 0x75, 0x70, 0xf9, 0xbd, 0xce, 0x50, 0xf2, 0xb8, 0xdd, 0xeb, 0x50, 0x2c,
|
||||
0x31, 0xe8, 0x05, 0x98, 0xe6, 0x3e, 0xf1, 0xbb, 0x5c, 0xda, 0x53, 0xac, 0x3f, 0x11, 0xa4, 0xa5,
|
||||
0x3d, 0x09, 0x3d, 0xed, 0x57, 0xe7, 0x43, 0x71, 0x0a, 0x84, 0x35, 0x83, 0xf0, 0x74, 0x6f, 0x5f,
|
||||
0x6e, 0x94, 0x75, 0x53, 0x5d, 0x7b, 0xc1, 0xfd, 0x91, 0x8d, 0x3c, 0x7d, 0x67, 0x88, 0x02, 0x8f,
|
||||
0xe0, 0x42, 0xc7, 0x80, 0x1c, 0xc2, 0xfd, 0xdb, 0x8c, 0xb8, 0x5c, 0xea, 0xba, 0x6d, 0xb7, 0xa9,
|
||||
0x0e, 0xf8, 0x2f, 0xa5, 0x3b, 0x71, 0xc1, 0x11, 0xe9, 0xbd, 0x35, 0x24, 0x0d, 0x8f, 0xd0, 0x80,
|
||||
0x9e, 0x86, 0x69, 0x46, 0x09, 0xf7, 0xdc, 0x4a, 0x5e, 0x2e, 0x3f, 0xcc, 0xca, 0x58, 0x42, 0xb1,
|
||||
0xc6, 0x8a, 0x84, 0xd6, 0xa6, 0x9c, 0x93, 0x56, 0x90, 0x5e, 0xc3, 0x84, 0xb6, 0xad, 0xc0, 0x38,
|
||||
0xc0, 0x9b, 0xbf, 0x31, 0xa0, 0xbc, 0xc1, 0x28, 0xf1, 0xe9, 0x24, 0x6e, 0xf1, 0xd0, 0x27, 0x8e,
|
||||
0xd6, 0x61, 0x5e, 0x7e, 0xdf, 0x25, 0x8e, 0x6d, 0xa9, 0x33, 0xc8, 0x49, 0xe6, 0xff, 0xd7, 0xcc,
|
||||
0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9d, 0x83, 0x72, 0x83, 0x3a, 0x34, 0x32, 0x79,
|
||||
0x13, 0x50, 0x8b, 0x91, 0x26, 0xdd, 0xa5, 0xcc, 0xf6, 0xac, 0x3d, 0xda, 0xf4, 0x5c, 0x8b, 0x4b,
|
||||
0x37, 0xca, 0xd6, 0xff, 0x4f, 0xec, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x1c, 0x28, 0x77,
|
||||
0x98, 0xfc, 0x2d, 0xf7, 0x5c, 0x79, 0x59, 0xe9, 0xca, 0x57, 0xd2, 0x1d, 0xe9, 0x6e, 0x9c, 0xb5,
|
||||
0xbe, 0x78, 0xd2, 0xaf, 0x96, 0x13, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x0b, 0x1e, 0xeb, 0x1c,
|
||||
0x12, 0xb7, 0x41, 0x3b, 0xd4, 0xb5, 0xa8, 0xeb, 0x73, 0xb9, 0x91, 0x85, 0xfa, 0x92, 0xa8, 0x45,
|
||||
0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x09, 0x8b, 0x1d, 0xe6, 0x75, 0x48, 0x4b, 0x6e, 0xcc,
|
||||
0xae, 0xe7, 0xd8, 0xcd, 0x9e, 0xde, 0xce, 0x67, 0x4f, 0xfa, 0xd5, 0xc5, 0xdd, 0x41, 0xe4, 0x69,
|
||||
0xbf, 0x7a, 0x4e, 0x6e, 0x9d, 0x80, 0x44, 0x48, 0x3c, 0x2c, 0x26, 0xe6, 0x06, 0xf9, 0xb1, 0x6e,
|
||||
0xf0, 0xa1, 0x01, 0x97, 0xec, 0x96, 0xeb, 0x31, 0x2a, 0xae, 0x08, 0x8a, 0x29, 0xb1, 0x6e, 0x30,
|
||||
0xe6, 0xb1, 0x37, 0x6c, 0xff, 0x70, 0xc3, 0xe9, 0x72, 0x9f, 0xb2, 0x3a, 0xa3, 0xe4, 0xc8, 0x76,
|
||||
0x5b, 0xbb, 0x9e, 0x4f, 0x5d, 0xdf, 0x26, 0x8e, 0xf4, 0xc8, 0x42, 0xfd, 0xea, 0x49, 0xbf, 0x7a,
|
||||
0x69, 0x6b, 0x42, 0x5e, 0x3c, 0xb1, 0x36, 0x73, 0x0b, 0x0a, 0x8d, 0xae, 0x0e, 0xdb, 0x97, 0xa0,
|
||||
0x60, 0xe9, 0xdf, 0xda, 0x39, 0x82, 0xfc, 0x11, 0xd2, 0x9c, 0xf6, 0xab, 0x65, 0x51, 0x21, 0xd7,
|
||||
0x02, 0x00, 0x0e, 0x59, 0xcc, 0x5f, 0x19, 0x50, 0x91, 0xce, 0xb9, 0x47, 0x1d, 0xda, 0xf4, 0x3d,
|
||||
0x86, 0xe9, 0x3b, 0x5d, 0x9b, 0xd1, 0x36, 0x75, 0x7d, 0xf4, 0x45, 0xc8, 0x1e, 0xd1, 0x9e, 0x4e,
|
||||
0x5d, 0x25, 0x2d, 0x36, 0xfb, 0x1a, 0xed, 0x61, 0x01, 0x47, 0x37, 0xa0, 0xe0, 0x75, 0x44, 0xfa,
|
||||
0xf0, 0x98, 0x4e, 0x5d, 0xcf, 0x04, 0xaa, 0x77, 0x34, 0xfc, 0xb4, 0x5f, 0x3d, 0x9f, 0x10, 0x1f,
|
||||
0x20, 0x70, 0xc8, 0x2a, 0x0e, 0xe5, 0x98, 0x38, 0x5d, 0x2a, 0x1c, 0x25, 0x3c, 0x94, 0xbb, 0x12,
|
||||
0x82, 0x35, 0xc6, 0x7c, 0x1a, 0x0a, 0x52, 0x0c, 0xbf, 0x7b, 0x19, 0x2d, 0x40, 0x16, 0x93, 0x7b,
|
||||
0xd2, 0xaa, 0x59, 0x2c, 0x7e, 0xc6, 0xee, 0x83, 0x1d, 0x80, 0x9b, 0xd4, 0x0f, 0x42, 0x68, 0x1d,
|
||||
0xe6, 0x83, 0x4b, 0x31, 0x79, 0x57, 0x87, 0x71, 0x89, 0x93, 0x68, 0x3c, 0x48, 0x6f, 0xbe, 0x05,
|
||||
0x45, 0x79, 0x9f, 0x8b, 0x62, 0x28, 0x2a, 0xbc, 0x8c, 0x07, 0x14, 0x5e, 0x41, 0x35, 0x95, 0x19,
|
||||
0x57, 0x4d, 0xc5, 0xcc, 0x75, 0xa0, 0xac, 0x78, 0x83, 0x52, 0x33, 0x95, 0x86, 0x67, 0xa1, 0x10,
|
||||
0x98, 0xa9, 0xb5, 0x84, 0x2d, 0x46, 0x20, 0x08, 0x87, 0x14, 0x31, 0x6d, 0x87, 0x90, 0xa8, 0x4d,
|
||||
0xd2, 0x29, 0x8b, 0xd5, 0x91, 0x99, 0x07, 0xd7, 0x91, 0x31, 0x4d, 0x3f, 0x80, 0xca, 0xb8, 0xbe,
|
||||
0xe4, 0x11, 0xaa, 0xa7, 0xf4, 0xa6, 0x98, 0xef, 0x1b, 0xb0, 0x10, 0x97, 0x94, 0xfe, 0xf8, 0xd2,
|
||||
0x2b, 0x39, 0xbb, 0x6e, 0x8e, 0xed, 0xc8, 0x2f, 0x0c, 0x58, 0x4a, 0x2c, 0x6d, 0xa2, 0x13, 0x9f,
|
||||
0xc0, 0xa8, 0xb8, 0x73, 0x64, 0x27, 0x70, 0x8e, 0x3f, 0x67, 0xa0, 0x7c, 0x8b, 0xec, 0x53, 0x27,
|
||||
0x88, 0x54, 0xf4, 0x7d, 0x28, 0xb5, 0x89, 0xdf, 0x3c, 0x94, 0xd0, 0xa0, 0xc7, 0x6a, 0xa4, 0xbb,
|
||||
0x36, 0x12, 0x92, 0x6a, 0xdb, 0x91, 0x98, 0x1b, 0xae, 0xcf, 0x7a, 0xf5, 0x73, 0xda, 0xa4, 0x52,
|
||||
0x0c, 0x83, 0xe3, 0xda, 0x64, 0x63, 0x2c, 0xbf, 0x6f, 0xbc, 0xdb, 0x11, 0x05, 0xe0, 0xe4, 0xfd,
|
||||
0x78, 0xc2, 0x84, 0x58, 0x56, 0x8b, 0x1a, 0xe3, 0xed, 0x01, 0xf9, 0x78, 0x48, 0xe3, 0xf2, 0xcb,
|
||||
0xb0, 0x30, 0x68, 0xbc, 0xc8, 0x3f, 0x61, 0x56, 0x54, 0x89, 0x70, 0x09, 0xf2, 0x32, 0x4f, 0xa9,
|
||||
0xc3, 0xc1, 0xea, 0xe3, 0x7a, 0xe6, 0x9a, 0x21, 0xd3, 0xeb, 0x38, 0x43, 0x1e, 0x53, 0x7a, 0x4d,
|
||||
0x88, 0x7f, 0xc8, 0xf4, 0xfa, 0x3b, 0x03, 0x72, 0xb2, 0xb5, 0x79, 0x0b, 0x0a, 0x62, 0xff, 0x2c,
|
||||
0xe2, 0x13, 0x69, 0x57, 0xea, 0xa6, 0x5a, 0x70, 0x6f, 0x53, 0x9f, 0x44, 0xde, 0x16, 0x40, 0x70,
|
||||
0x28, 0x11, 0x61, 0xc8, 0xdb, 0x3e, 0x6d, 0x07, 0x07, 0xf9, 0xdc, 0x58, 0xd1, 0x7a, 0xa4, 0x53,
|
||||
0xc3, 0xe4, 0xde, 0x8d, 0x77, 0x7d, 0xea, 0x8a, 0xc3, 0x88, 0x42, 0x63, 0x4b, 0xc8, 0xc0, 0x4a,
|
||||
0x94, 0xf9, 0x4f, 0x03, 0x42, 0x55, 0xc2, 0xf9, 0x39, 0x75, 0x0e, 0x6e, 0xd9, 0xee, 0x91, 0xde,
|
||||
0xd6, 0xd0, 0x9c, 0x3d, 0x0d, 0xc7, 0x21, 0xc5, 0xa8, 0xeb, 0x21, 0x33, 0xd9, 0xf5, 0x20, 0x14,
|
||||
0x36, 0x3d, 0xd7, 0xb7, 0xdd, 0xee, 0x50, 0xb4, 0x6d, 0x68, 0x38, 0x0e, 0x29, 0x44, 0x49, 0xc7,
|
||||
0x68, 0x9b, 0xd8, 0xae, 0xed, 0xb6, 0xc4, 0x22, 0x36, 0xbc, 0xae, 0xeb, 0xcb, 0xda, 0x46, 0x97,
|
||||
0x74, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xff, 0xca, 0x41, 0x49, 0xac, 0x39, 0xb8, 0xe7, 0x5e,
|
||||
0x84, 0xb2, 0x13, 0xf7, 0x02, 0xbd, 0xf6, 0xf3, 0xda, 0x94, 0x64, 0x5c, 0xe3, 0x24, 0xad, 0x60,
|
||||
0x3e, 0x88, 0xdf, 0xd0, 0x7a, 0x0f, 0x42, 0xe6, 0x64, 0x75, 0x90, 0xa4, 0x15, 0xd9, 0xeb, 0x9e,
|
||||
0x88, 0x0f, 0x5d, 0xe3, 0x85, 0x47, 0xf4, 0x86, 0x00, 0x62, 0x85, 0x43, 0xdb, 0x70, 0x8e, 0x38,
|
||||
0x8e, 0x77, 0x4f, 0x02, 0xeb, 0x9e, 0x77, 0xd4, 0x26, 0xec, 0x88, 0xcb, 0xb1, 0x44, 0xa1, 0xfe,
|
||||
0x05, 0xcd, 0x72, 0x6e, 0x7d, 0x98, 0x04, 0x8f, 0xe2, 0x1b, 0x75, 0x6c, 0xb9, 0x09, 0x8f, 0xed,
|
||||
0x10, 0x96, 0x06, 0x40, 0x32, 0xca, 0xf5, 0x8c, 0xe0, 0xaa, 0x96, 0xb3, 0x84, 0x47, 0xd0, 0x9c,
|
||||
0x8e, 0x81, 0xe3, 0x91, 0x12, 0xd1, 0x75, 0x98, 0x13, 0x9e, 0xec, 0x75, 0xfd, 0xa0, 0x82, 0xcf,
|
||||
0xcb, 0xe3, 0x46, 0x27, 0xfd, 0xea, 0xdc, 0xed, 0x04, 0x06, 0x0f, 0x50, 0x8a, 0xcd, 0x75, 0xec,
|
||||
0xb6, 0xed, 0x57, 0x66, 0x24, 0x4b, 0xb8, 0xb9, 0xb7, 0x04, 0x10, 0x2b, 0x5c, 0xc2, 0x03, 0x0b,
|
||||
0x67, 0x7a, 0xe0, 0x06, 0x2c, 0x72, 0xea, 0x5a, 0x5b, 0xae, 0x2d, 0x0a, 0xc9, 0x1b, 0xc7, 0xb2,
|
||||
0x3e, 0x2f, 0xc9, 0x83, 0x38, 0x2f, 0x8a, 0xeb, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x94,
|
||||
0x05, 0xa4, 0x5a, 0x1f, 0x4b, 0x15, 0x65, 0x2a, 0x2f, 0x8a, 0x06, 0x4d, 0xb7, 0x4e, 0xc6, 0x40,
|
||||
0x83, 0xa6, 0xbb, 0xa6, 0x00, 0x8f, 0xb6, 0xa1, 0xa8, 0xf2, 0x53, 0x14, 0x73, 0x6b, 0x9a, 0xb8,
|
||||
0xb8, 0x13, 0x20, 0x4e, 0xfb, 0xd5, 0xe5, 0x84, 0x9a, 0x10, 0x23, 0x9b, 0xe7, 0x48, 0x02, 0xba,
|
||||
0x02, 0x40, 0x3a, 0x76, 0x7c, 0x7c, 0x5a, 0x8c, 0x86, 0x68, 0xd1, 0x20, 0x04, 0xc7, 0xa8, 0xd0,
|
||||
0x2b, 0x90, 0xf3, 0x1f, 0xae, 0xc1, 0x2d, 0xc8, 0xfe, 0x5d, 0xb4, 0xb3, 0x52, 0x82, 0xd0, 0x2e,
|
||||
0x83, 0x82, 0x0b, 0xb3, 0x74, 0x6f, 0x1a, 0x6a, 0xdf, 0x0c, 0x31, 0x38, 0x46, 0x85, 0xbe, 0x09,
|
||||
0x85, 0x03, 0x5d, 0xcf, 0xca, 0xd3, 0x4d, 0x9d, 0x67, 0x83, 0x2a, 0x58, 0x4d, 0x70, 0x82, 0x2f,
|
||||
0x1c, 0x4a, 0x43, 0x5f, 0x85, 0x12, 0xef, 0xee, 0x87, 0x25, 0x80, 0x72, 0x89, 0xf0, 0xbe, 0xdd,
|
||||
0x8b, 0x50, 0x38, 0x4e, 0x67, 0xbe, 0x03, 0xc5, 0x6d, 0xbb, 0xc9, 0x3c, 0xd9, 0x92, 0x3f, 0x03,
|
||||
0x33, 0x3c, 0xd1, 0x6f, 0x86, 0x27, 0x19, 0xb8, 0x6a, 0x80, 0x17, 0x3e, 0xea, 0x12, 0xd7, 0x53,
|
||||
0x5d, 0x65, 0x3e, 0xf2, 0xd1, 0xd7, 0x05, 0x10, 0x2b, 0xdc, 0xf5, 0x25, 0x51, 0x65, 0xfc, 0xf8,
|
||||
0xa3, 0xea, 0xd4, 0x07, 0x1f, 0x55, 0xa7, 0x3e, 0xfc, 0x48, 0x57, 0x1c, 0xbf, 0x07, 0x80, 0x9d,
|
||||
0xfd, 0xef, 0xd2, 0xa6, 0xca, 0xdd, 0xa9, 0xa6, 0xac, 0xc1, 0x70, 0x5f, 0x4e, 0x59, 0x33, 0x03,
|
||||
0x95, 0x63, 0x0c, 0x87, 0x13, 0x94, 0x68, 0x0d, 0x8a, 0xe1, 0xfc, 0x54, 0xfb, 0xc7, 0x62, 0xe0,
|
||||
0x6f, 0xe1, 0x90, 0x15, 0x47, 0x34, 0x89, 0x8b, 0x24, 0x77, 0xe6, 0x45, 0x52, 0x87, 0x6c, 0xd7,
|
||||
0xb6, 0xf4, 0xfc, 0xe2, 0x52, 0x70, 0x91, 0xdf, 0xd9, 0x6a, 0x9c, 0xf6, 0xab, 0x4f, 0x8c, 0x7b,
|
||||
0xb6, 0xf0, 0x7b, 0x1d, 0xca, 0x6b, 0x77, 0xb6, 0x1a, 0x58, 0x30, 0x8f, 0xca, 0x6a, 0xd3, 0x13,
|
||||
0x66, 0xb5, 0x2b, 0x00, 0xad, 0x68, 0x0a, 0xa4, 0x92, 0x46, 0xe8, 0x88, 0xb1, 0xe9, 0x4f, 0x8c,
|
||||
0x0a, 0x71, 0x58, 0x6c, 0x32, 0x4a, 0x82, 0x69, 0x0c, 0xf7, 0x49, 0x5b, 0xcd, 0x95, 0x27, 0x8b,
|
||||
0x89, 0x0b, 0x5a, 0xcd, 0xe2, 0xc6, 0xa0, 0x30, 0x3c, 0x2c, 0x1f, 0x79, 0xb0, 0x68, 0xe9, 0x86,
|
||||
0x3d, 0x52, 0x5a, 0x9c, 0x58, 0xa9, 0xcc, 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x0e,
|
||||
0x2c, 0x07, 0xc0, 0xe1, 0xa9, 0x89, 0xcc, 0xfa, 0xd9, 0xfa, 0xca, 0x49, 0xbf, 0xba, 0xdc, 0x18,
|
||||
0x4b, 0x85, 0x1f, 0x20, 0x01, 0x59, 0x30, 0xed, 0xa8, 0x2a, 0xb9, 0x24, 0x2b, 0x9b, 0xaf, 0xa5,
|
||||
0x5b, 0x45, 0xe4, 0xfd, 0xb5, 0x78, 0x75, 0x1c, 0x4e, 0xc0, 0x74, 0x61, 0xac, 0x65, 0xa3, 0x77,
|
||||
0xa1, 0x44, 0x5c, 0xd7, 0xf3, 0x89, 0x9a, 0xe3, 0xcc, 0x4a, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f,
|
||||
0x64, 0x0c, 0x54, 0xe3, 0x31, 0x0c, 0x8e, 0xab, 0x42, 0xf7, 0x60, 0xde, 0xbb, 0xe7, 0x52, 0x86,
|
||||
0xe9, 0x01, 0x65, 0xd4, 0x6d, 0x52, 0x5e, 0x29, 0x4b, 0xed, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47,
|
||||
0x2e, 0x9d, 0x84, 0x73, 0x3c, 0xa8, 0x05, 0xd5, 0x44, 0x6e, 0x75, 0x89, 0x63, 0x7f, 0x8f, 0x32,
|
||||
0x5e, 0x99, 0x8b, 0x46, 0xff, 0x9b, 0x21, 0x14, 0xc7, 0x28, 0x50, 0x17, 0xca, 0xed, 0xf8, 0x95,
|
||||
0x51, 0x59, 0x94, 0x66, 0x5e, 0x4b, 0x67, 0xe6, 0xf0, 0xa5, 0x16, 0x95, 0x41, 0x09, 0x1c, 0x4e,
|
||||
0x6a, 0x59, 0x7e, 0x01, 0x4a, 0x0f, 0xd9, 0x21, 0x88, 0x0e, 0x63, 0xf0, 0x40, 0x26, 0xea, 0x30,
|
||||
0xfe, 0x90, 0x81, 0xb9, 0xe4, 0x36, 0x0e, 0x5c, 0x87, 0xf9, 0x54, 0xd7, 0x61, 0xd0, 0xcb, 0x1a,
|
||||
0x63, 0xdf, 0x80, 0x82, 0xfc, 0x9c, 0x1d, 0x9b, 0x9f, 0x75, 0x1a, 0xcc, 0x3d, 0x4a, 0x1a, 0xac,
|
||||
0x01, 0x88, 0x62, 0x85, 0x79, 0x8e, 0x43, 0x99, 0x1e, 0xab, 0xa9, 0xb7, 0x9e, 0x10, 0x8a, 0x63,
|
||||
0x14, 0xa2, 0xa4, 0xde, 0x77, 0xbc, 0xe6, 0x91, 0xdc, 0x82, 0x20, 0x7a, 0x65, 0xee, 0x2b, 0xa8,
|
||||
0x92, 0xba, 0x3e, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x1e, 0x9c, 0xdf, 0x25, 0x4c, 0x14, 0x39, 0x51,
|
||||
0xa4, 0xc8, 0x9e, 0xe5, 0xed, 0xa1, 0x8e, 0xe8, 0xd2, 0xa4, 0x11, 0x17, 0x6d, 0x7e, 0x04, 0x8b,
|
||||
0xba, 0x22, 0xf3, 0x2f, 0x06, 0x5c, 0x18, 0xa9, 0xfb, 0x73, 0xe8, 0xc8, 0xde, 0x4e, 0x76, 0x64,
|
||||
0x2f, 0xa6, 0x1c, 0x0a, 0x8f, 0xb2, 0x76, 0x4c, 0x7f, 0x36, 0x03, 0xf9, 0x5d, 0x51, 0x09, 0x9b,
|
||||
0x9f, 0x18, 0x30, 0x2b, 0x7f, 0x4d, 0x32, 0x93, 0xaf, 0x26, 0x9f, 0x6a, 0x8a, 0x8f, 0xef, 0x99,
|
||||
0xe6, 0x71, 0x0c, 0xed, 0xdf, 0x37, 0x20, 0x39, 0x0d, 0x47, 0x2f, 0xab, 0x10, 0x30, 0xc2, 0x71,
|
||||
0xf5, 0x84, 0xee, 0xff, 0xd2, 0xb8, 0x96, 0xf4, 0x5c, 0xaa, 0x69, 0xe5, 0xb3, 0x50, 0xc4, 0x9e,
|
||||
0xe7, 0xef, 0x12, 0xff, 0x90, 0x8b, 0xbd, 0xeb, 0x88, 0x1f, 0x7a, 0x7b, 0xe5, 0xde, 0x49, 0x0c,
|
||||
0x56, 0x70, 0xf3, 0xa7, 0x06, 0x5c, 0x18, 0xfb, 0x02, 0x27, 0xb2, 0x48, 0x33, 0xfc, 0xd2, 0x2b,
|
||||
0x0a, 0x1d, 0x39, 0xa2, 0xc3, 0x31, 0x2a, 0xd1, 0x4b, 0x26, 0x9e, 0xed, 0x06, 0x7b, 0xc9, 0x84,
|
||||
0x36, 0x9c, 0xa4, 0x35, 0xff, 0x91, 0x01, 0xfd, 0xe4, 0xf5, 0x5f, 0x76, 0xfa, 0xa7, 0x07, 0x1e,
|
||||
0xdc, 0xe6, 0x92, 0x0f, 0x6e, 0xe1, 0xeb, 0x5a, 0xec, 0xc5, 0x29, 0xfb, 0xe0, 0x17, 0x27, 0xf4,
|
||||
0x7c, 0xf8, 0x88, 0xa5, 0x7c, 0x68, 0x25, 0xf9, 0x88, 0x75, 0xda, 0xaf, 0xce, 0x6a, 0xe1, 0xc9,
|
||||
0x47, 0xad, 0x37, 0x61, 0xc6, 0xa2, 0x3e, 0xb1, 0x1d, 0xd5, 0x17, 0xa6, 0x7e, 0x96, 0x51, 0xc2,
|
||||
0x1a, 0x8a, 0xb5, 0x5e, 0x12, 0x36, 0xe9, 0x0f, 0x1c, 0x08, 0x14, 0x09, 0xbb, 0xe9, 0x59, 0xaa,
|
||||
0x23, 0xc9, 0x47, 0x09, 0x7b, 0xc3, 0xb3, 0x28, 0x96, 0x18, 0xf3, 0x03, 0x03, 0x4a, 0x4a, 0xd2,
|
||||
0x06, 0xe9, 0x72, 0x8a, 0x2e, 0x87, 0xab, 0x50, 0xc7, 0x7d, 0x21, 0xfe, 0x5a, 0x79, 0xda, 0xaf,
|
||||
0x16, 0x25, 0x99, 0x6c, 0x66, 0x46, 0xbc, 0xca, 0x65, 0xce, 0xd8, 0xa3, 0x27, 0x21, 0x2f, 0x03,
|
||||
0x48, 0x6f, 0x66, 0xf4, 0xec, 0x2a, 0x80, 0x58, 0xe1, 0xcc, 0xcf, 0x32, 0x50, 0x4e, 0x2c, 0x2e,
|
||||
0x45, 0x5f, 0x10, 0x8e, 0x50, 0x33, 0x29, 0xc6, 0xf2, 0xe3, 0xff, 0xe4, 0xa0, 0xaf, 0xaf, 0xe9,
|
||||
0x47, 0xb9, 0xbe, 0xbe, 0x05, 0xd3, 0x4d, 0xb1, 0x47, 0xc1, 0x7f, 0x66, 0x2e, 0x4f, 0x72, 0x9c,
|
||||
0x72, 0x77, 0x23, 0x6f, 0x94, 0x9f, 0x1c, 0x6b, 0x81, 0xe8, 0x26, 0x2c, 0x32, 0xea, 0xb3, 0xde,
|
||||
0xfa, 0x81, 0x4f, 0x59, 0x7c, 0x98, 0x90, 0x8f, 0xaa, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc,
|
||||
0x7d, 0x98, 0xbd, 0x4d, 0xf6, 0x9d, 0xf0, 0xa1, 0x11, 0x43, 0xd9, 0x76, 0x9b, 0x4e, 0xd7, 0xa2,
|
||||
0x2a, 0xa1, 0x07, 0xd9, 0x2b, 0x08, 0xda, 0xad, 0x38, 0xf2, 0xb4, 0x5f, 0x3d, 0x97, 0x00, 0xa8,
|
||||
0x97, 0x35, 0x9c, 0x14, 0x61, 0x3a, 0x90, 0xfb, 0x1c, 0x3b, 0xc9, 0x6f, 0x43, 0x31, 0xaa, 0xf5,
|
||||
0x1f, 0xb3, 0x4a, 0xf3, 0x6d, 0x28, 0x08, 0x8f, 0x0f, 0x7a, 0xd4, 0x33, 0xaa, 0xa4, 0x64, 0xed,
|
||||
0x95, 0x49, 0x53, 0x7b, 0xc9, 0xe7, 0xea, 0x3b, 0x1d, 0xeb, 0x11, 0x9f, 0xab, 0x33, 0x8f, 0x72,
|
||||
0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x80, 0xfa, 0x4b, 0x8f, 0xb8, 0x64, 0x54, 0x01, 0x11, 0xbb,
|
||||
0x64, 0xe2, 0xf7, 0x7f, 0xec, 0x85, 0xe1, 0x47, 0x06, 0x80, 0x1c, 0xe5, 0xc9, 0x31, 0x52, 0x8a,
|
||||
0x3f, 0x46, 0xdc, 0x81, 0x69, 0x4f, 0x79, 0xa4, 0x7a, 0xb2, 0x9e, 0x70, 0x5e, 0x1c, 0x06, 0x92,
|
||||
0xf2, 0x49, 0xac, 0x85, 0xd5, 0x5f, 0xfd, 0xf8, 0xfe, 0xca, 0xd4, 0x27, 0xf7, 0x57, 0xa6, 0x3e,
|
||||
0xbd, 0xbf, 0x32, 0xf5, 0xde, 0xc9, 0x8a, 0xf1, 0xf1, 0xc9, 0x8a, 0xf1, 0xc9, 0xc9, 0x8a, 0xf1,
|
||||
0xe9, 0xc9, 0x8a, 0xf1, 0xd9, 0xc9, 0x8a, 0xf1, 0xc1, 0xdf, 0x56, 0xa6, 0xde, 0x7c, 0x2a, 0xcd,
|
||||
0x5f, 0x25, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xda, 0x63, 0x4c, 0x51, 0x29, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
|
||||
@ -1983,6 +1986,16 @@ func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
|
||||
i--
|
||||
if *m.IgnoreStoreReadErrorWithClusterBreakingPotential {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x30
|
||||
}
|
||||
if len(m.DryRun) > 0 {
|
||||
for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.DryRun[iNdEx])
|
||||
@ -3773,6 +3786,9 @@ func (m *DeleteOptions) Size() (n int) {
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -4506,6 +4522,7 @@ func (this *DeleteOptions) String() string {
|
||||
`OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
|
||||
`PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
|
||||
`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
|
||||
`IgnoreStoreReadErrorWithClusterBreakingPotential:` + valueToStringGenerated(this.IgnoreStoreReadErrorWithClusterBreakingPotential) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -6456,6 +6473,27 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field IgnoreStoreReadErrorWithClusterBreakingPotential", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
b := bool(v != 0)
|
||||
m.IgnoreStoreReadErrorWithClusterBreakingPotential = &b
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
15
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
generated
vendored
15
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
generated
vendored
@ -315,6 +315,21 @@ message DeleteOptions {
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
repeated string dryRun = 5;
|
||||
|
||||
// if set to true, it will trigger an unsafe deletion of the resource in
|
||||
// case the normal deletion flow fails with a corrupt object error.
|
||||
// A resource is considered corrupt if it can not be retrieved from
|
||||
// the underlying storage successfully because of a) its data can
|
||||
// not be transformed e.g. decryption failure, or b) it fails
|
||||
// to decode into an object.
|
||||
// NOTE: unsafe deletion ignores finalizer constraints, skips
|
||||
// precondition checks, and removes the object from the storage.
|
||||
// WARNING: This may potentially break the cluster if the workload
|
||||
// associated with the resource being unsafe-deleted relies on normal
|
||||
// deletion flow. Use only if you REALLY know what you are doing.
|
||||
// The default value is false, and the user must opt in to enable it
|
||||
// +optional
|
||||
optional bool ignoreStoreReadErrorWithClusterBreakingPotential = 6;
|
||||
}
|
||||
|
||||
// Duration is a wrapper around time.Duration which supports correct
|
||||
|
45
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
generated
vendored
45
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
generated
vendored
@ -439,6 +439,20 @@ const (
|
||||
//
|
||||
// The annotation is added to a "Bookmark" event.
|
||||
InitialEventsAnnotationKey = "k8s.io/initial-events-end"
|
||||
|
||||
// InitialEventsListBlueprintAnnotationKey is the name of the key
|
||||
// where an empty, versioned list is encoded in the requested format
|
||||
// (e.g., protobuf, JSON, CBOR), then base64-encoded and stored as a string.
|
||||
//
|
||||
// This encoding matches the request encoding format, which may be
|
||||
// protobuf, JSON, CBOR, or others, depending on what the client requested.
|
||||
// This ensures that the reconstructed list can be processed through the
|
||||
// same decoder chain that would handle a standard LIST call response.
|
||||
//
|
||||
// The annotation is added to a "Bookmark" event and is used by clients
|
||||
// to guarantee the format consistency when reconstructing
|
||||
// the list during WatchList processing.
|
||||
InitialEventsListBlueprintAnnotationKey = "kubernetes.io/initial-events-list-blueprint"
|
||||
)
|
||||
|
||||
// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
|
||||
@ -546,6 +560,21 @@ type DeleteOptions struct {
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
|
||||
|
||||
// if set to true, it will trigger an unsafe deletion of the resource in
|
||||
// case the normal deletion flow fails with a corrupt object error.
|
||||
// A resource is considered corrupt if it can not be retrieved from
|
||||
// the underlying storage successfully because of a) its data can
|
||||
// not be transformed e.g. decryption failure, or b) it fails
|
||||
// to decode into an object.
|
||||
// NOTE: unsafe deletion ignores finalizer constraints, skips
|
||||
// precondition checks, and removes the object from the storage.
|
||||
// WARNING: This may potentially break the cluster if the workload
|
||||
// associated with the resource being unsafe-deleted relies on normal
|
||||
// deletion flow. Use only if you REALLY know what you are doing.
|
||||
// The default value is false, and the user must opt in to enable it
|
||||
// +optional
|
||||
IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty" protobuf:"varint,6,opt,name=ignoreStoreReadErrorWithClusterBreakingPotential"`
|
||||
}
|
||||
|
||||
const (
|
||||
@ -902,6 +931,22 @@ const (
|
||||
// Status code 500
|
||||
StatusReasonServerTimeout StatusReason = "ServerTimeout"
|
||||
|
||||
// StatusReasonStoreReadError means that the server encountered an error while
|
||||
// retrieving resources from the backend object store.
|
||||
// This may be due to backend database error, or because processing of the read
|
||||
// resource failed.
|
||||
// Details:
|
||||
// "kind" string - the kind attribute of the resource being acted on.
|
||||
// "name" string - the prefix where the reading error(s) occurred
|
||||
// "causes" []StatusCause
|
||||
// - (optional):
|
||||
// - "type" CauseType - CauseTypeUnexpectedServerResponse
|
||||
// - "message" string - the error message from the store backend
|
||||
// - "field" string - the full path with the key of the resource that failed reading
|
||||
//
|
||||
// Status code 500
|
||||
StatusReasonStoreReadError StatusReason = "StorageReadError"
|
||||
|
||||
// StatusReasonTimeout means that the request could not be completed within the given time.
|
||||
// Clients can get this response only when they specified a timeout param in the request,
|
||||
// or if the server cannot complete the operation within a reasonable amount of time.
|
||||
|
1
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
generated
vendored
1
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
generated
vendored
@ -129,6 +129,7 @@ var map_DeleteOptions = map[string]string{
|
||||
"orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
|
||||
"propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
|
||||
"dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"ignoreStoreReadErrorWithClusterBreakingPotential": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it",
|
||||
}
|
||||
|
||||
func (DeleteOptions) SwaggerDoc() map[string]string {
|
||||
|
7
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
generated
vendored
7
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
generated
vendored
@ -339,6 +339,13 @@ func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptio
|
||||
} else {
|
||||
out.DryRun = nil
|
||||
}
|
||||
if values, ok := map[string][]string(*in)["ignoreStoreReadErrorWithClusterBreakingPotential"]; ok && len(values) > 0 {
|
||||
if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.IgnoreStoreReadErrorWithClusterBreakingPotential, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
5
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
generated
vendored
5
api/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
generated
vendored
@ -290,6 +290,11 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
|
||||
in, out := &in.IgnoreStoreReadErrorWithClusterBreakingPotential, &out.IgnoreStoreReadErrorWithClusterBreakingPotential
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
4
api/vendor/k8s.io/apimachinery/pkg/labels/selector.go
generated
vendored
4
api/vendor/k8s.io/apimachinery/pkg/labels/selector.go
generated
vendored
@ -18,6 +18,7 @@ package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -27,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/klog/v2"
|
||||
stringslices "k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -313,7 +313,7 @@ func (r Requirement) Equal(x Requirement) bool {
|
||||
if r.operator != x.operator {
|
||||
return false
|
||||
}
|
||||
return stringslices.Equal(r.strValues, x.strValues)
|
||||
return slices.Equal(r.strValues, x.strValues)
|
||||
}
|
||||
|
||||
// Empty returns true if the internalSelector doesn't restrict selection space
|
||||
|
18
api/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
generated
vendored
18
api/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
generated
vendored
@ -284,3 +284,21 @@ func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error {
|
||||
func (e *encoderWithAllocator) Identifier() Identifier {
|
||||
return e.encoder.Identifier()
|
||||
}
|
||||
|
||||
type nondeterministicEncoderToEncoderAdapter struct {
|
||||
NondeterministicEncoder
|
||||
}
|
||||
|
||||
func (e nondeterministicEncoderToEncoderAdapter) Encode(obj Object, w io.Writer) error {
|
||||
return e.EncodeNondeterministic(obj, w)
|
||||
}
|
||||
|
||||
// UseNondeterministicEncoding returns an Encoder that encodes objects using the provided Encoder's
|
||||
// EncodeNondeterministic method if it implements NondeterministicEncoder, otherwise it returns the
|
||||
// provided Encoder as-is.
|
||||
func UseNondeterministicEncoding(encoder Encoder) Encoder {
|
||||
if nondeterministic, ok := encoder.(NondeterministicEncoder); ok {
|
||||
return nondeterministicEncoderToEncoderAdapter{nondeterministic}
|
||||
}
|
||||
return encoder
|
||||
}
|
||||
|
13
api/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
generated
vendored
13
api/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
generated
vendored
@ -69,6 +69,19 @@ type Encoder interface {
|
||||
Identifier() Identifier
|
||||
}
|
||||
|
||||
// NondeterministicEncoder is implemented by Encoders that can serialize objects more efficiently in
|
||||
// cases where the output does not need to be deterministic.
|
||||
type NondeterministicEncoder interface {
|
||||
Encoder
|
||||
|
||||
// EncodeNondeterministic writes an object to the stream. Unlike the Encode method of
|
||||
// Encoder, EncodeNondeterministic does not guarantee that any two invocations will write
|
||||
// the same sequence of bytes to the io.Writer. Any differences will not be significant to a
|
||||
// generic decoder. For example, map entries and struct fields might be encoded in any
|
||||
// order.
|
||||
EncodeNondeterministic(Object, io.Writer) error
|
||||
}
|
||||
|
||||
// MemoryAllocator is responsible for allocating memory.
|
||||
// By encapsulating memory allocation into its own interface, we can reuse the memory
|
||||
// across many operations in places we know it can significantly improve the performance.
|
||||
|
25
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
generated
vendored
25
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
generated
vendored
@ -23,14 +23,39 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
|
||||
)
|
||||
|
||||
// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will
|
||||
// make the same choice as the CBOR implementation of runtime.Serializer.
|
||||
//
|
||||
// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
|
||||
// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
|
||||
// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
|
||||
// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
|
||||
// removed in favor of automatic transcoding to CBOR.
|
||||
func Marshal(src interface{}) ([]byte, error) {
|
||||
if err := modes.RejectCustomMarshalers(src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return modes.Encode.Marshal(src)
|
||||
}
|
||||
|
||||
// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to
|
||||
// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer.
|
||||
//
|
||||
// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
|
||||
// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
|
||||
// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
|
||||
// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
|
||||
// removed in favor of automatic transcoding to CBOR.
|
||||
func Unmarshal(src []byte, dst interface{}) error {
|
||||
if err := modes.RejectCustomMarshalers(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return modes.Decode.Unmarshal(src, dst)
|
||||
}
|
||||
|
||||
// Diagnose accepts well-formed CBOR bytes and returns a string representing the same data item in
|
||||
// human-readable diagnostic notation (RFC 8949 Section 8). The diagnostic notation is not meant to
|
||||
// be parsed.
|
||||
func Diagnose(src []byte) (string, error) {
|
||||
return modes.Diagnostic.Diagnose(src)
|
||||
}
|
||||
|
2
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
generated
vendored
2
api/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
generated
vendored
@ -105,7 +105,7 @@ var Encode = EncMode{
|
||||
var EncodeNondeterministic = EncMode{
|
||||
delegate: func() cbor.UserBufferEncMode {
|
||||
opts := Encode.options()
|
||||
opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0.
|
||||
opts.Sort = cbor.SortFastShuffle
|
||||
em, err := opts.UserBufferEncMode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
3
api/vendor/k8s.io/apimachinery/pkg/runtime/types.go
generated
vendored
3
api/vendor/k8s.io/apimachinery/pkg/runtime/types.go
generated
vendored
@ -46,7 +46,8 @@ const (
|
||||
ContentTypeJSON string = "application/json"
|
||||
ContentTypeYAML string = "application/yaml"
|
||||
ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
|
||||
ContentTypeCBOR string = "application/cbor"
|
||||
ContentTypeCBOR string = "application/cbor" // RFC 8949
|
||||
ContentTypeCBORSequence string = "application/cbor-seq" // RFC 8742
|
||||
)
|
||||
|
||||
// RawExtension is used to hold extensions in external versions.
|
||||
|
4
api/vendor/k8s.io/apimachinery/pkg/types/patch.go
generated
vendored
4
api/vendor/k8s.io/apimachinery/pkg/types/patch.go
generated
vendored
@ -25,5 +25,7 @@ const (
|
||||
JSONPatchType PatchType = "application/json-patch+json"
|
||||
MergePatchType PatchType = "application/merge-patch+json"
|
||||
StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
|
||||
ApplyPatchType PatchType = "application/apply-patch+yaml"
|
||||
ApplyPatchType PatchType = ApplyYAMLPatchType
|
||||
ApplyYAMLPatchType PatchType = "application/apply-patch+yaml"
|
||||
ApplyCBORPatchType PatchType = "application/apply-patch+cbor"
|
||||
)
|
||||
|
4
api/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
generated
vendored
4
api/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
generated
vendored
@ -45,7 +45,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic}
|
||||
//
|
||||
// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
|
||||
//
|
||||
// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
|
||||
// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
|
||||
func HandleCrash(additionalHandlers ...func(interface{})) {
|
||||
if r := recover(); r != nil {
|
||||
additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
|
||||
@ -146,7 +146,7 @@ type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues
|
||||
// is preferable to logging the error - the default behavior is to log but the
|
||||
// errors may be sent to a remote server for analysis.
|
||||
//
|
||||
// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
|
||||
// Contextual logging: HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
|
||||
func HandleError(err error) {
|
||||
// this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead
|
||||
if err == nil {
|
||||
|
26
api/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
generated
vendored
26
api/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
generated
vendored
@ -220,26 +220,24 @@ func Forbidden(field *Path, detail string) *Error {
|
||||
return &Error{ErrorTypeForbidden, field.String(), "", detail}
|
||||
}
|
||||
|
||||
// TooLong returns a *Error indicating "too long". This is used to
|
||||
// report that the given value is too long. This is similar to
|
||||
// Invalid, but the returned error will not include the too-long
|
||||
// value.
|
||||
// TooLong returns a *Error indicating "too long". This is used to report that
|
||||
// the given value is too long. This is similar to Invalid, but the returned
|
||||
// error will not include the too-long value. If maxLength is negative, it will
|
||||
// be included in the message. The value argument is not used.
|
||||
func TooLong(field *Path, value interface{}, maxLength int) *Error {
|
||||
return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
|
||||
}
|
||||
|
||||
// TooLongMaxLength returns a *Error indicating "too long". This is used to
|
||||
// report that the given value is too long. This is similar to
|
||||
// Invalid, but the returned error will not include the too-long
|
||||
// value. If maxLength is negative, no max length will be included in the message.
|
||||
func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
|
||||
var msg string
|
||||
if maxLength >= 0 {
|
||||
msg = fmt.Sprintf("may not be longer than %d", maxLength)
|
||||
msg = fmt.Sprintf("may not be more than %d bytes", maxLength)
|
||||
} else {
|
||||
msg = "value is too long"
|
||||
}
|
||||
return &Error{ErrorTypeTooLong, field.String(), value, msg}
|
||||
return &Error{ErrorTypeTooLong, field.String(), "<value omitted>", msg}
|
||||
}
|
||||
|
||||
// TooLongMaxLength returns a *Error indicating "too long".
|
||||
// Deprecated: Use TooLong instead.
|
||||
func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
|
||||
return TooLong(field, "", maxLength)
|
||||
}
|
||||
|
||||
// TooMany returns a *Error indicating "too many". This is used to
|
||||
|
19
api/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
generated
vendored
19
api/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
generated
vendored
@ -175,6 +175,8 @@ func IsValidLabelValue(value string) []string {
|
||||
}
|
||||
|
||||
const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
|
||||
const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
|
||||
|
||||
const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
|
||||
|
||||
// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
|
||||
@ -204,10 +206,14 @@ func IsDNS1123Label(value string) []string {
|
||||
const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
|
||||
const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
|
||||
|
||||
const dns1123SubdomainFmtWithUnderscore string = dns1123LabelFmtWithUnderscore + "(\\." + dns1123LabelFmtWithUnderscore + ")*"
|
||||
const dns1123SubdomainErrorMsgFG string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '_', '-' or '.', and must start and end with an alphanumeric character"
|
||||
|
||||
// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
|
||||
const DNS1123SubdomainMaxLength int = 253
|
||||
|
||||
var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
|
||||
var dns1123SubdomainRegexpWithUnderscore = regexp.MustCompile("^" + dns1123SubdomainFmtWithUnderscore + "$")
|
||||
|
||||
// IsDNS1123Subdomain tests for a string that conforms to the definition of a
|
||||
// subdomain in DNS (RFC 1123).
|
||||
@ -222,6 +228,19 @@ func IsDNS1123Subdomain(value string) []string {
|
||||
return errs
|
||||
}
|
||||
|
||||
// IsDNS1123SubdomainWithUnderscore tests for a string that conforms to the definition of a
|
||||
// subdomain in DNS (RFC 1123), but allows the use of an underscore in the string
|
||||
func IsDNS1123SubdomainWithUnderscore(value string) []string {
|
||||
var errs []string
|
||||
if len(value) > DNS1123SubdomainMaxLength {
|
||||
errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
|
||||
}
|
||||
if !dns1123SubdomainRegexpWithUnderscore.MatchString(value) {
|
||||
errs = append(errs, RegexError(dns1123SubdomainErrorMsgFG, dns1123SubdomainFmt, "example.com"))
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
|
||||
const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
|
||||
|
||||
|
82
api/vendor/k8s.io/utils/strings/slices/slices.go
generated
vendored
82
api/vendor/k8s.io/utils/strings/slices/slices.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package slices defines various functions useful with slices of string type.
|
||||
// The goal is to be as close as possible to
|
||||
// https://github.com/golang/go/issues/45955. Ideal would be if we can just
|
||||
// replace "stringslices" if the "slices" package becomes standard.
|
||||
package slices
|
||||
|
||||
// Equal reports whether two slices are equal: the same length and all
|
||||
// elements equal. If the lengths are different, Equal returns false.
|
||||
// Otherwise, the elements are compared in index order, and the
|
||||
// comparison stops at the first unequal pair.
|
||||
func Equal(s1, s2 []string) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i, n := range s1 {
|
||||
if n != s2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter appends to d each element e of s for which keep(e) returns true.
|
||||
// It returns the modified d. d may be s[:0], in which case the kept
|
||||
// elements will be stored in the same slice.
|
||||
// if the slices overlap in some other way, the results are unspecified.
|
||||
// To create a new slice with the filtered results, pass nil for d.
|
||||
func Filter(d, s []string, keep func(string) bool) []string {
|
||||
for _, n := range s {
|
||||
if keep(n) {
|
||||
d = append(d, n)
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// Contains reports whether v is present in s.
|
||||
func Contains(s []string, v string) bool {
|
||||
return Index(s, v) >= 0
|
||||
}
|
||||
|
||||
// Index returns the index of the first occurrence of v in s, or -1 if
|
||||
// not present.
|
||||
func Index(s []string, v string) int {
|
||||
// "Contains" may be replaced with "Index(s, v) >= 0":
|
||||
// https://github.com/golang/go/issues/45955#issuecomment-873377947
|
||||
for i, n := range s {
|
||||
if n == v {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Functions below are not in https://github.com/golang/go/issues/45955
|
||||
|
||||
// Clone returns a new clone of s.
|
||||
func Clone(s []string) []string {
|
||||
// https://github.com/go101/go101/wiki/There-is-not-a-perfect-way-to-clone-slices-in-Go
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]string, len(s))
|
||||
copy(c, s)
|
||||
return c
|
||||
}
|
26
api/vendor/modules.txt
vendored
26
api/vendor/modules.txt
vendored
@ -21,6 +21,8 @@ github.com/google/gofuzz/bytesource
|
||||
# github.com/json-iterator/go v1.1.12
|
||||
## explicit; go 1.12
|
||||
github.com/json-iterator/go
|
||||
# github.com/kr/text v0.2.0
|
||||
## explicit
|
||||
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
|
||||
## explicit
|
||||
github.com/modern-go/concurrent
|
||||
@ -41,13 +43,13 @@ github.com/stretchr/testify/require
|
||||
# github.com/x448/float16 v0.8.4
|
||||
## explicit; go 1.11
|
||||
github.com/x448/float16
|
||||
# golang.org/x/net v0.26.0
|
||||
# golang.org/x/net v0.30.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/http/httpguts
|
||||
golang.org/x/net/http2
|
||||
golang.org/x/net/http2/hpack
|
||||
golang.org/x/net/idna
|
||||
# golang.org/x/text v0.16.0
|
||||
# golang.org/x/text v0.19.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
@ -62,13 +64,13 @@ gopkg.in/yaml.v2
|
||||
# gopkg.in/yaml.v3 v3.0.1
|
||||
## explicit
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.31.3
|
||||
## explicit; go 1.22.0
|
||||
# k8s.io/api v0.32.0
|
||||
## explicit; go 1.23.0
|
||||
k8s.io/api/core/v1
|
||||
k8s.io/api/rbac/v1
|
||||
k8s.io/api/storage/v1
|
||||
# k8s.io/apimachinery v0.31.3
|
||||
## explicit; go 1.22.0
|
||||
# k8s.io/apimachinery v0.32.0
|
||||
## explicit; go 1.23.0
|
||||
k8s.io/apimachinery/pkg/api/resource
|
||||
k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
k8s.io/apimachinery/pkg/conversion
|
||||
@ -101,16 +103,18 @@ k8s.io/klog/v2/internal/dbg
|
||||
k8s.io/klog/v2/internal/serialize
|
||||
k8s.io/klog/v2/internal/severity
|
||||
k8s.io/klog/v2/internal/sloghandler
|
||||
# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
# k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
## explicit; go 1.18
|
||||
k8s.io/utils/internal/third_party/forked/golang/net
|
||||
k8s.io/utils/net
|
||||
k8s.io/utils/ptr
|
||||
k8s.io/utils/strings/slices
|
||||
# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
||||
## explicit; go 1.18
|
||||
# sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3
|
||||
## explicit; go 1.21
|
||||
sigs.k8s.io/json
|
||||
sigs.k8s.io/json/internal/golang/encoding/json
|
||||
# sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||
# sigs.k8s.io/structured-merge-diff/v4 v4.4.2
|
||||
## explicit; go 1.13
|
||||
sigs.k8s.io/structured-merge-diff/v4/value
|
||||
# sigs.k8s.io/yaml v1.4.0
|
||||
## explicit; go 1.12
|
||||
sigs.k8s.io/yaml/goyaml.v2
|
||||
|
2
api/vendor/sigs.k8s.io/json/Makefile
generated
vendored
2
api/vendor/sigs.k8s.io/json/Makefile
generated
vendored
@ -19,7 +19,7 @@ vet:
|
||||
go vet sigs.k8s.io/json
|
||||
|
||||
@echo "checking for external dependencies"
|
||||
@deps=$$(go mod graph); \
|
||||
@deps=$$(go list -f '{{ if not (or .Standard .Module.Main) }}{{.ImportPath}}{{ end }}' -deps sigs.k8s.io/json/... || true); \
|
||||
if [ -n "$${deps}" ]; then \
|
||||
echo "only stdlib dependencies allowed, found:"; \
|
||||
echo "$${deps}"; \
|
||||
|
2
api/vendor/sigs.k8s.io/json/OWNERS
generated
vendored
2
api/vendor/sigs.k8s.io/json/OWNERS
generated
vendored
@ -2,5 +2,5 @@
|
||||
|
||||
approvers:
|
||||
- deads2k
|
||||
- lavalamp
|
||||
- jpbetz
|
||||
- liggitt
|
||||
|
140
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
generated
vendored
140
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
generated
vendored
@ -21,10 +21,10 @@ import (
|
||||
|
||||
// Unmarshal parses the JSON-encoded data and stores the result
|
||||
// in the value pointed to by v. If v is nil or not a pointer,
|
||||
// Unmarshal returns an InvalidUnmarshalError.
|
||||
// Unmarshal returns an [InvalidUnmarshalError].
|
||||
//
|
||||
// Unmarshal uses the inverse of the encodings that
|
||||
// Marshal uses, allocating maps, slices, and pointers as necessary,
|
||||
// [Marshal] uses, allocating maps, slices, and pointers as necessary,
|
||||
// with the following additional rules:
|
||||
//
|
||||
// To unmarshal JSON into a pointer, Unmarshal first handles the case of
|
||||
@ -33,28 +33,28 @@ import (
|
||||
// the value pointed at by the pointer. If the pointer is nil, Unmarshal
|
||||
// allocates a new value for it to point to.
|
||||
//
|
||||
// To unmarshal JSON into a value implementing the Unmarshaler interface,
|
||||
// Unmarshal calls that value's UnmarshalJSON method, including
|
||||
// To unmarshal JSON into a value implementing [Unmarshaler],
|
||||
// Unmarshal calls that value's [Unmarshaler.UnmarshalJSON] method, including
|
||||
// when the input is a JSON null.
|
||||
// Otherwise, if the value implements encoding.TextUnmarshaler
|
||||
// and the input is a JSON quoted string, Unmarshal calls that value's
|
||||
// UnmarshalText method with the unquoted form of the string.
|
||||
// Otherwise, if the value implements [encoding.TextUnmarshaler]
|
||||
// and the input is a JSON quoted string, Unmarshal calls
|
||||
// [encoding.TextUnmarshaler.UnmarshalText] with the unquoted form of the string.
|
||||
//
|
||||
// To unmarshal JSON into a struct, Unmarshal matches incoming object
|
||||
// keys to the keys used by Marshal (either the struct field name or its tag),
|
||||
// keys to the keys used by [Marshal] (either the struct field name or its tag),
|
||||
// preferring an exact match but also accepting a case-insensitive match. By
|
||||
// default, object keys which don't have a corresponding struct field are
|
||||
// ignored (see Decoder.DisallowUnknownFields for an alternative).
|
||||
// ignored (see [Decoder.DisallowUnknownFields] for an alternative).
|
||||
//
|
||||
// To unmarshal JSON into an interface value,
|
||||
// Unmarshal stores one of these in the interface value:
|
||||
//
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// string, for JSON strings
|
||||
// []interface{}, for JSON arrays
|
||||
// map[string]interface{}, for JSON objects
|
||||
// nil for JSON null
|
||||
// - bool, for JSON booleans
|
||||
// - float64, for JSON numbers
|
||||
// - string, for JSON strings
|
||||
// - []interface{}, for JSON arrays
|
||||
// - map[string]interface{}, for JSON objects
|
||||
// - nil for JSON null
|
||||
//
|
||||
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
|
||||
// to zero and then appends each element to the slice.
|
||||
@ -72,16 +72,15 @@ import (
|
||||
// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
|
||||
// reuses the existing map, keeping existing entries. Unmarshal then stores
|
||||
// key-value pairs from the JSON object into the map. The map's key type must
|
||||
// either be any string type, an integer, implement json.Unmarshaler, or
|
||||
// implement encoding.TextUnmarshaler.
|
||||
// either be any string type, an integer, or implement [encoding.TextUnmarshaler].
|
||||
//
|
||||
// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError.
|
||||
// If the JSON-encoded data contain a syntax error, Unmarshal returns a [SyntaxError].
|
||||
//
|
||||
// If a JSON value is not appropriate for a given target type,
|
||||
// or if a JSON number overflows the target type, Unmarshal
|
||||
// skips that field and completes the unmarshaling as best it can.
|
||||
// If no more serious errors are encountered, Unmarshal returns
|
||||
// an UnmarshalTypeError describing the earliest such error. In any
|
||||
// an [UnmarshalTypeError] describing the earliest such error. In any
|
||||
// case, it's not guaranteed that all the remaining fields following
|
||||
// the problematic one will be unmarshaled into the target object.
|
||||
//
|
||||
@ -119,7 +118,7 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error {
|
||||
// a JSON value. UnmarshalJSON must copy the JSON data
|
||||
// if it wishes to retain the data after returning.
|
||||
//
|
||||
// By convention, to approximate the behavior of Unmarshal itself,
|
||||
// By convention, to approximate the behavior of [Unmarshal] itself,
|
||||
// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalJSON([]byte) error
|
||||
@ -157,8 +156,8 @@ func (e *UnmarshalFieldError) Error() string {
|
||||
return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
|
||||
}
|
||||
|
||||
// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
|
||||
// (The argument to Unmarshal must be a non-nil pointer.)
|
||||
// An InvalidUnmarshalError describes an invalid argument passed to [Unmarshal].
|
||||
// (The argument to [Unmarshal] must be a non-nil pointer.)
|
||||
type InvalidUnmarshalError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
@ -573,17 +572,10 @@ func (d *decodeState) array(v reflect.Value) error {
|
||||
break
|
||||
}
|
||||
|
||||
// Get element of array, growing if necessary.
|
||||
// Expand slice length, growing the slice if necessary.
|
||||
if v.Kind() == reflect.Slice {
|
||||
// Grow slice if necessary
|
||||
if i >= v.Cap() {
|
||||
newcap := v.Cap() + v.Cap()/2
|
||||
if newcap < 4 {
|
||||
newcap = 4
|
||||
}
|
||||
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
|
||||
reflect.Copy(newv, v)
|
||||
v.Set(newv)
|
||||
v.Grow(1)
|
||||
}
|
||||
if i >= v.Len() {
|
||||
v.SetLen(i + 1)
|
||||
@ -620,13 +612,11 @@ func (d *decodeState) array(v reflect.Value) error {
|
||||
|
||||
if i < v.Len() {
|
||||
if v.Kind() == reflect.Array {
|
||||
// Array. Zero the rest.
|
||||
z := reflect.Zero(v.Type().Elem())
|
||||
for ; i < v.Len(); i++ {
|
||||
v.Index(i).Set(z)
|
||||
v.Index(i).SetZero() // zero remainder of array
|
||||
}
|
||||
} else {
|
||||
v.SetLen(i)
|
||||
v.SetLen(i) // truncate the slice
|
||||
}
|
||||
}
|
||||
if i == 0 && v.Kind() == reflect.Slice {
|
||||
@ -636,7 +626,7 @@ func (d *decodeState) array(v reflect.Value) error {
|
||||
}
|
||||
|
||||
var nullLiteral = []byte("null")
|
||||
var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
var textUnmarshalerType = reflect.TypeFor[encoding.TextUnmarshaler]()
|
||||
|
||||
// object consumes an object from d.data[d.off-1:], decoding into v.
|
||||
// The first byte ('{') of the object has been read already.
|
||||
@ -776,7 +766,7 @@ func (d *decodeState) object(v reflect.Value) error {
|
||||
if !mapElem.IsValid() {
|
||||
mapElem = reflect.New(elemType).Elem()
|
||||
} else {
|
||||
mapElem.Set(reflect.Zero(elemType))
|
||||
mapElem.SetZero()
|
||||
}
|
||||
subv = mapElem
|
||||
if checkDuplicateField != nil {
|
||||
@ -784,28 +774,14 @@ func (d *decodeState) object(v reflect.Value) error {
|
||||
}
|
||||
d.appendStrictFieldStackKey(string(key))
|
||||
} else {
|
||||
var f *field
|
||||
if i, ok := fields.nameIndex[string(key)]; ok {
|
||||
// Found an exact name match.
|
||||
f = &fields.list[i]
|
||||
if checkDuplicateField != nil {
|
||||
checkDuplicateField(i, f.name)
|
||||
}
|
||||
} else if !d.caseSensitive {
|
||||
// Fall back to the expensive case-insensitive
|
||||
// linear search.
|
||||
for i := range fields.list {
|
||||
ff := &fields.list[i]
|
||||
if ff.equalFold(ff.nameBytes, key) {
|
||||
f = ff
|
||||
if checkDuplicateField != nil {
|
||||
checkDuplicateField(i, f.name)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
f := fields.byExactName[string(key)]
|
||||
if f == nil && !d.caseSensitive {
|
||||
f = fields.byFoldedName[string(foldName(key))]
|
||||
}
|
||||
if f != nil {
|
||||
if checkDuplicateField != nil {
|
||||
checkDuplicateField(f.listIndex, f.name)
|
||||
}
|
||||
subv = v
|
||||
destring = f.quoted
|
||||
for _, i := range f.index {
|
||||
@ -874,33 +850,35 @@ func (d *decodeState) object(v reflect.Value) error {
|
||||
if v.Kind() == reflect.Map {
|
||||
kt := t.Key()
|
||||
var kv reflect.Value
|
||||
switch {
|
||||
case reflect.PointerTo(kt).Implements(textUnmarshalerType):
|
||||
if reflect.PointerTo(kt).Implements(textUnmarshalerType) {
|
||||
kv = reflect.New(kt)
|
||||
if err := d.literalStore(item, kv, true); err != nil {
|
||||
return err
|
||||
}
|
||||
kv = kv.Elem()
|
||||
case kt.Kind() == reflect.String:
|
||||
kv = reflect.ValueOf(key).Convert(kt)
|
||||
default:
|
||||
} else {
|
||||
switch kt.Kind() {
|
||||
case reflect.String:
|
||||
kv = reflect.New(kt).Elem()
|
||||
kv.SetString(string(key))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s := string(key)
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil || reflect.Zero(kt).OverflowInt(n) {
|
||||
if err != nil || kt.OverflowInt(n) {
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
|
||||
break
|
||||
}
|
||||
kv = reflect.ValueOf(n).Convert(kt)
|
||||
kv = reflect.New(kt).Elem()
|
||||
kv.SetInt(n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
s := string(key)
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil || reflect.Zero(kt).OverflowUint(n) {
|
||||
if err != nil || kt.OverflowUint(n) {
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
|
||||
break
|
||||
}
|
||||
kv = reflect.ValueOf(n).Convert(kt)
|
||||
kv = reflect.New(kt).Elem()
|
||||
kv.SetUint(n)
|
||||
default:
|
||||
panic("json: Unexpected key type") // should never occur
|
||||
}
|
||||
@ -950,12 +928,12 @@ func (d *decodeState) convertNumber(s string) (any, error) {
|
||||
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
|
||||
return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeFor[float64](), Offset: int64(d.off)}
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
var numberType = reflect.TypeOf(Number(""))
|
||||
var numberType = reflect.TypeFor[Number]()
|
||||
|
||||
// literalStore decodes a literal stored in item into v.
|
||||
//
|
||||
@ -965,7 +943,7 @@ var numberType = reflect.TypeOf(Number(""))
|
||||
func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
|
||||
// Check for unmarshaler.
|
||||
if len(item) == 0 {
|
||||
//Empty string given
|
||||
// Empty string given.
|
||||
d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
|
||||
return nil
|
||||
}
|
||||
@ -1012,7 +990,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice:
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
v.SetZero()
|
||||
// otherwise, ignore null for primitives/string
|
||||
}
|
||||
case 't', 'f': // true, false
|
||||
@ -1064,10 +1042,11 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
|
||||
}
|
||||
v.SetBytes(b[:n])
|
||||
case reflect.String:
|
||||
if v.Type() == numberType && !isValidNumber(string(s)) {
|
||||
t := string(s)
|
||||
if v.Type() == numberType && !isValidNumber(t) {
|
||||
return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)
|
||||
}
|
||||
v.SetString(string(s))
|
||||
v.SetString(t)
|
||||
case reflect.Interface:
|
||||
if v.NumMethod() == 0 {
|
||||
v.Set(reflect.ValueOf(string(s)))
|
||||
@ -1083,13 +1062,12 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
|
||||
}
|
||||
panic(phasePanicMsg)
|
||||
}
|
||||
s := string(item)
|
||||
switch v.Kind() {
|
||||
default:
|
||||
if v.Kind() == reflect.String && v.Type() == numberType {
|
||||
// s must be a valid number, because it's
|
||||
// already been tokenized.
|
||||
v.SetString(s)
|
||||
v.SetString(string(item))
|
||||
break
|
||||
}
|
||||
if fromQuoted {
|
||||
@ -1097,7 +1075,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
|
||||
}
|
||||
d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
|
||||
case reflect.Interface:
|
||||
n, err := d.convertNumber(s)
|
||||
n, err := d.convertNumber(string(item))
|
||||
if err != nil {
|
||||
d.saveError(err)
|
||||
break
|
||||
@ -1109,25 +1087,25 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
|
||||
v.Set(reflect.ValueOf(n))
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
n, err := strconv.ParseInt(string(item), 10, 64)
|
||||
if err != nil || v.OverflowInt(n) {
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
|
||||
break
|
||||
}
|
||||
v.SetInt(n)
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
n, err := strconv.ParseUint(string(item), 10, 64)
|
||||
if err != nil || v.OverflowUint(n) {
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
|
||||
break
|
||||
}
|
||||
v.SetUint(n)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
n, err := strconv.ParseFloat(s, v.Type().Bits())
|
||||
n, err := strconv.ParseFloat(string(item), v.Type().Bits())
|
||||
if err != nil || v.OverflowFloat(n) {
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
|
||||
d.saveError(&UnmarshalTypeError{Value: "number " + string(item), Type: v.Type(), Offset: int64(d.readIndex())})
|
||||
break
|
||||
}
|
||||
v.SetFloat(n)
|
||||
|
488
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
generated
vendored
488
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
generated
vendored
@ -12,12 +12,13 @@ package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -28,29 +29,30 @@ import (
|
||||
// Marshal returns the JSON encoding of v.
|
||||
//
|
||||
// Marshal traverses the value v recursively.
|
||||
// If an encountered value implements the Marshaler interface
|
||||
// and is not a nil pointer, Marshal calls its MarshalJSON method
|
||||
// to produce JSON. If no MarshalJSON method is present but the
|
||||
// value implements encoding.TextMarshaler instead, Marshal calls
|
||||
// its MarshalText method and encodes the result as a JSON string.
|
||||
// If an encountered value implements [Marshaler]
|
||||
// and is not a nil pointer, Marshal calls [Marshaler.MarshalJSON]
|
||||
// to produce JSON. If no [Marshaler.MarshalJSON] method is present but the
|
||||
// value implements [encoding.TextMarshaler] instead, Marshal calls
|
||||
// [encoding.TextMarshaler.MarshalText] and encodes the result as a JSON string.
|
||||
// The nil pointer exception is not strictly necessary
|
||||
// but mimics a similar, necessary exception in the behavior of
|
||||
// UnmarshalJSON.
|
||||
// [Unmarshaler.UnmarshalJSON].
|
||||
//
|
||||
// Otherwise, Marshal uses the following type-dependent default encodings:
|
||||
//
|
||||
// Boolean values encode as JSON booleans.
|
||||
//
|
||||
// Floating point, integer, and Number values encode as JSON numbers.
|
||||
// Floating point, integer, and [Number] values encode as JSON numbers.
|
||||
// NaN and +/-Inf values will return an [UnsupportedValueError].
|
||||
//
|
||||
// String values encode as JSON strings coerced to valid UTF-8,
|
||||
// replacing invalid bytes with the Unicode replacement rune.
|
||||
// So that the JSON will be safe to embed inside HTML <script> tags,
|
||||
// the string is encoded using HTMLEscape,
|
||||
// the string is encoded using [HTMLEscape],
|
||||
// which replaces "<", ">", "&", U+2028, and U+2029 are escaped
|
||||
// to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029".
|
||||
// This replacement can be disabled when using an Encoder,
|
||||
// by calling SetEscapeHTML(false).
|
||||
// This replacement can be disabled when using an [Encoder],
|
||||
// by calling [Encoder.SetEscapeHTML](false).
|
||||
//
|
||||
// Array and slice values encode as JSON arrays, except that
|
||||
// []byte encodes as a base64-encoded string, and a nil slice
|
||||
@ -107,7 +109,7 @@ import (
|
||||
// only Unicode letters, digits, and ASCII punctuation except quotation
|
||||
// marks, backslash, and comma.
|
||||
//
|
||||
// Anonymous struct fields are usually marshaled as if their inner exported fields
|
||||
// Embedded struct fields are usually marshaled as if their inner exported fields
|
||||
// were fields in the outer struct, subject to the usual Go visibility rules amended
|
||||
// as described in the next paragraph.
|
||||
// An anonymous struct field with a name given in its JSON tag is treated as
|
||||
@ -134,11 +136,11 @@ import (
|
||||
// a JSON tag of "-".
|
||||
//
|
||||
// Map values encode as JSON objects. The map's key type must either be a
|
||||
// string, an integer type, or implement encoding.TextMarshaler. The map keys
|
||||
// string, an integer type, or implement [encoding.TextMarshaler]. The map keys
|
||||
// are sorted and used as JSON object keys by applying the following rules,
|
||||
// subject to the UTF-8 coercion described for string values above:
|
||||
// - keys of any string type are used directly
|
||||
// - encoding.TextMarshalers are marshaled
|
||||
// - keys that implement [encoding.TextMarshaler] are marshaled
|
||||
// - integer keys are converted to strings
|
||||
//
|
||||
// Pointer values encode as the value pointed to.
|
||||
@ -149,13 +151,14 @@ import (
|
||||
//
|
||||
// Channel, complex, and function values cannot be encoded in JSON.
|
||||
// Attempting to encode such a value causes Marshal to return
|
||||
// an UnsupportedTypeError.
|
||||
// an [UnsupportedTypeError].
|
||||
//
|
||||
// JSON cannot represent cyclic data structures and Marshal does not
|
||||
// handle them. Passing cyclic structures to Marshal will result in
|
||||
// an error.
|
||||
func Marshal(v any) ([]byte, error) {
|
||||
e := newEncodeState()
|
||||
defer encodeStatePool.Put(e)
|
||||
|
||||
err := e.marshal(v, encOpts{escapeHTML: true})
|
||||
if err != nil {
|
||||
@ -163,12 +166,10 @@ func Marshal(v any) ([]byte, error) {
|
||||
}
|
||||
buf := append([]byte(nil), e.Bytes()...)
|
||||
|
||||
encodeStatePool.Put(e)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// MarshalIndent is like Marshal but applies Indent to format the output.
|
||||
// MarshalIndent is like [Marshal] but applies [Indent] to format the output.
|
||||
// Each JSON element in the output will begin on a new line beginning with prefix
|
||||
// followed by one or more copies of indent according to the indentation nesting.
|
||||
func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
|
||||
@ -176,47 +177,12 @@ func MarshalIndent(v any, prefix, indent string) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err = Indent(&buf, b, prefix, indent)
|
||||
b2 := make([]byte, 0, indentGrowthFactor*len(b))
|
||||
b2, err = appendIndent(b2, b, prefix, indent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
|
||||
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
|
||||
// so that the JSON will be safe to embed inside HTML <script> tags.
|
||||
// For historical reasons, web browsers don't honor standard HTML
|
||||
// escaping within <script> tags, so an alternative JSON encoding must
|
||||
// be used.
|
||||
func HTMLEscape(dst *bytes.Buffer, src []byte) {
|
||||
// The characters can only appear in string literals,
|
||||
// so just scan the string one byte at a time.
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if c == '<' || c == '>' || c == '&' {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
}
|
||||
return b2, nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface implemented by types that
|
||||
@ -225,7 +191,7 @@ type Marshaler interface {
|
||||
MarshalJSON() ([]byte, error)
|
||||
}
|
||||
|
||||
// An UnsupportedTypeError is returned by Marshal when attempting
|
||||
// An UnsupportedTypeError is returned by [Marshal] when attempting
|
||||
// to encode an unsupported value type.
|
||||
type UnsupportedTypeError struct {
|
||||
Type reflect.Type
|
||||
@ -235,7 +201,7 @@ func (e *UnsupportedTypeError) Error() string {
|
||||
return "json: unsupported type: " + e.Type.String()
|
||||
}
|
||||
|
||||
// An UnsupportedValueError is returned by Marshal when attempting
|
||||
// An UnsupportedValueError is returned by [Marshal] when attempting
|
||||
// to encode an unsupported value.
|
||||
type UnsupportedValueError struct {
|
||||
Value reflect.Value
|
||||
@ -246,9 +212,9 @@ func (e *UnsupportedValueError) Error() string {
|
||||
return "json: unsupported value: " + e.Str
|
||||
}
|
||||
|
||||
// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
|
||||
// Before Go 1.2, an InvalidUTF8Error was returned by [Marshal] when
|
||||
// attempting to encode a string value with invalid UTF-8 sequences.
|
||||
// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
|
||||
// As of Go 1.2, [Marshal] instead coerces the string to valid UTF-8 by
|
||||
// replacing invalid bytes with the Unicode replacement rune U+FFFD.
|
||||
//
|
||||
// Deprecated: No longer used; kept for compatibility.
|
||||
@ -260,7 +226,8 @@ func (e *InvalidUTF8Error) Error() string {
|
||||
return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
|
||||
}
|
||||
|
||||
// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method.
|
||||
// A MarshalerError represents an error from calling a
|
||||
// [Marshaler.MarshalJSON] or [encoding.TextMarshaler.MarshalText] method.
|
||||
type MarshalerError struct {
|
||||
Type reflect.Type
|
||||
Err error
|
||||
@ -280,12 +247,11 @@ func (e *MarshalerError) Error() string {
|
||||
// Unwrap returns the underlying error.
|
||||
func (e *MarshalerError) Unwrap() error { return e.Err }
|
||||
|
||||
var hex = "0123456789abcdef"
|
||||
const hex = "0123456789abcdef"
|
||||
|
||||
// An encodeState encodes JSON into a bytes.Buffer.
|
||||
type encodeState struct {
|
||||
bytes.Buffer // accumulated output
|
||||
scratch [64]byte
|
||||
|
||||
// Keep track of what pointers we've seen in the current recursive call
|
||||
// path, to avoid cycles that could lead to a stack overflow. Only do
|
||||
@ -341,16 +307,12 @@ func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Pointer:
|
||||
return v.IsNil()
|
||||
case reflect.Bool,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64,
|
||||
reflect.Interface, reflect.Pointer:
|
||||
return v.IsZero()
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -407,8 +369,8 @@ func typeEncoder(t reflect.Type) encoderFunc {
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
marshalerType = reflect.TypeFor[Marshaler]()
|
||||
textMarshalerType = reflect.TypeFor[encoding.TextMarshaler]()
|
||||
)
|
||||
|
||||
// newTypeEncoder constructs an encoderFunc for a type.
|
||||
@ -477,8 +439,10 @@ func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
}
|
||||
b, err := m.MarshalJSON()
|
||||
if err == nil {
|
||||
// copy JSON into buffer, checking validity.
|
||||
err = compact(&e.Buffer, b, opts.escapeHTML)
|
||||
e.Grow(len(b))
|
||||
out := e.AvailableBuffer()
|
||||
out, err = appendCompact(out, b, opts.escapeHTML)
|
||||
e.Buffer.Write(out)
|
||||
}
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
|
||||
@ -494,8 +458,10 @@ func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
m := va.Interface().(Marshaler)
|
||||
b, err := m.MarshalJSON()
|
||||
if err == nil {
|
||||
// copy JSON into buffer, checking validity.
|
||||
err = compact(&e.Buffer, b, opts.escapeHTML)
|
||||
e.Grow(len(b))
|
||||
out := e.AvailableBuffer()
|
||||
out, err = appendCompact(out, b, opts.escapeHTML)
|
||||
e.Buffer.Write(out)
|
||||
}
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
|
||||
@ -516,7 +482,7 @@ func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
||||
}
|
||||
e.stringBytes(b, opts.escapeHTML)
|
||||
e.Write(appendString(e.AvailableBuffer(), b, opts.escapeHTML))
|
||||
}
|
||||
|
||||
func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
@ -530,43 +496,31 @@ func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
if err != nil {
|
||||
e.error(&MarshalerError{v.Type(), err, "MarshalText"})
|
||||
}
|
||||
e.stringBytes(b, opts.escapeHTML)
|
||||
e.Write(appendString(e.AvailableBuffer(), b, opts.escapeHTML))
|
||||
}
|
||||
|
||||
func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
if v.Bool() {
|
||||
e.WriteString("true")
|
||||
} else {
|
||||
e.WriteString("false")
|
||||
}
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
b := e.AvailableBuffer()
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendBool(b, v.Bool())
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
b := e.AvailableBuffer()
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendInt(b, v.Int(), 10)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
}
|
||||
|
||||
func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
b := e.AvailableBuffer()
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = strconv.AppendUint(b, v.Uint(), 10)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
}
|
||||
|
||||
type floatEncoder int // number of bits
|
||||
@ -582,7 +536,8 @@ func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
// See golang.org/issue/6384 and golang.org/issue/14135.
|
||||
// Like fmt %g, but the exponent cutoffs are different
|
||||
// and exponents themselves are not padded to two digits.
|
||||
b := e.scratch[:0]
|
||||
b := e.AvailableBuffer()
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
abs := math.Abs(f)
|
||||
fmt := byte('f')
|
||||
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||
@ -600,14 +555,8 @@ func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
b = b[:n-1]
|
||||
}
|
||||
}
|
||||
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@ -626,24 +575,18 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
if !isValidNumber(numStr) {
|
||||
e.error(fmt.Errorf("json: invalid number literal %q", numStr))
|
||||
}
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
e.WriteString(numStr)
|
||||
if opts.quoted {
|
||||
e.WriteByte('"')
|
||||
}
|
||||
b := e.AvailableBuffer()
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
b = append(b, numStr...)
|
||||
b = mayAppendQuote(b, opts.quoted)
|
||||
e.Write(b)
|
||||
return
|
||||
}
|
||||
if opts.quoted {
|
||||
e2 := newEncodeState()
|
||||
// Since we encode the string twice, we only need to escape HTML
|
||||
// the first time.
|
||||
e2.string(v.String(), opts.escapeHTML)
|
||||
e.stringBytes(e2.Bytes(), false)
|
||||
encodeStatePool.Put(e2)
|
||||
b := appendString(nil, v.String(), opts.escapeHTML)
|
||||
e.Write(appendString(e.AvailableBuffer(), b, false)) // no need to escape again since it is already escaped
|
||||
} else {
|
||||
e.string(v.String(), opts.escapeHTML)
|
||||
e.Write(appendString(e.AvailableBuffer(), v.String(), opts.escapeHTML))
|
||||
}
|
||||
}
|
||||
|
||||
@ -725,7 +668,8 @@ type structEncoder struct {
|
||||
|
||||
type structFields struct {
|
||||
list []field
|
||||
nameIndex map[string]int
|
||||
byExactName map[string]*field
|
||||
byFoldedName map[string]*field
|
||||
}
|
||||
|
||||
func (se structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
@ -793,22 +737,26 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
||||
e.WriteByte('{')
|
||||
|
||||
// Extract and sort the keys.
|
||||
sv := make([]reflectWithString, v.Len())
|
||||
mi := v.MapRange()
|
||||
var (
|
||||
sv = make([]reflectWithString, v.Len())
|
||||
mi = v.MapRange()
|
||||
err error
|
||||
)
|
||||
for i := 0; mi.Next(); i++ {
|
||||
sv[i].k = mi.Key()
|
||||
sv[i].v = mi.Value()
|
||||
if err := sv[i].resolve(); err != nil {
|
||||
if sv[i].ks, err = resolveKeyName(mi.Key()); err != nil {
|
||||
e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error()))
|
||||
}
|
||||
sv[i].v = mi.Value()
|
||||
}
|
||||
sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks })
|
||||
slices.SortFunc(sv, func(i, j reflectWithString) int {
|
||||
return strings.Compare(i.ks, j.ks)
|
||||
})
|
||||
|
||||
for i, kv := range sv {
|
||||
if i > 0 {
|
||||
e.WriteByte(',')
|
||||
}
|
||||
e.string(kv.ks, opts.escapeHTML)
|
||||
e.Write(appendString(e.AvailableBuffer(), kv.ks, opts.escapeHTML))
|
||||
e.WriteByte(':')
|
||||
me.elemEnc(e, kv.v, opts)
|
||||
}
|
||||
@ -835,29 +783,13 @@ func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
|
||||
e.WriteString("null")
|
||||
return
|
||||
}
|
||||
|
||||
s := v.Bytes()
|
||||
e.WriteByte('"')
|
||||
encodedLen := base64.StdEncoding.EncodedLen(len(s))
|
||||
if encodedLen <= len(e.scratch) {
|
||||
// If the encoded bytes fit in e.scratch, avoid an extra
|
||||
// allocation and use the cheaper Encoding.Encode.
|
||||
dst := e.scratch[:encodedLen]
|
||||
base64.StdEncoding.Encode(dst, s)
|
||||
e.Write(dst)
|
||||
} else if encodedLen <= 1024 {
|
||||
// The encoded bytes are short enough to allocate for, and
|
||||
// Encoding.Encode is still cheaper.
|
||||
dst := make([]byte, encodedLen)
|
||||
base64.StdEncoding.Encode(dst, s)
|
||||
e.Write(dst)
|
||||
} else {
|
||||
// The encoded bytes are too long to cheaply allocate, and
|
||||
// Encoding.Encode is no longer noticeably cheaper.
|
||||
enc := base64.NewEncoder(base64.StdEncoding, e)
|
||||
enc.Write(s)
|
||||
enc.Close()
|
||||
}
|
||||
e.WriteByte('"')
|
||||
b := e.AvailableBuffer()
|
||||
b = append(b, '"')
|
||||
b = base64.StdEncoding.AppendEncode(b, s)
|
||||
b = append(b, '"')
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
|
||||
@ -997,78 +929,77 @@ func typeByIndex(t reflect.Type, index []int) reflect.Type {
|
||||
}
|
||||
|
||||
type reflectWithString struct {
|
||||
k reflect.Value
|
||||
v reflect.Value
|
||||
ks string
|
||||
}
|
||||
|
||||
func (w *reflectWithString) resolve() error {
|
||||
if w.k.Kind() == reflect.String {
|
||||
w.ks = w.k.String()
|
||||
return nil
|
||||
func resolveKeyName(k reflect.Value) (string, error) {
|
||||
if k.Kind() == reflect.String {
|
||||
return k.String(), nil
|
||||
}
|
||||
if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok {
|
||||
if w.k.Kind() == reflect.Pointer && w.k.IsNil() {
|
||||
return nil
|
||||
if tm, ok := k.Interface().(encoding.TextMarshaler); ok {
|
||||
if k.Kind() == reflect.Pointer && k.IsNil() {
|
||||
return "", nil
|
||||
}
|
||||
buf, err := tm.MarshalText()
|
||||
w.ks = string(buf)
|
||||
return err
|
||||
return string(buf), err
|
||||
}
|
||||
switch w.k.Kind() {
|
||||
switch k.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
w.ks = strconv.FormatInt(w.k.Int(), 10)
|
||||
return nil
|
||||
return strconv.FormatInt(k.Int(), 10), nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
w.ks = strconv.FormatUint(w.k.Uint(), 10)
|
||||
return nil
|
||||
return strconv.FormatUint(k.Uint(), 10), nil
|
||||
}
|
||||
panic("unexpected map key type")
|
||||
}
|
||||
|
||||
// NOTE: keep in sync with stringBytes below.
|
||||
func (e *encodeState) string(s string, escapeHTML bool) {
|
||||
e.WriteByte('"')
|
||||
func appendString[Bytes []byte | string](dst []byte, src Bytes, escapeHTML bool) []byte {
|
||||
dst = append(dst, '"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
for i := 0; i < len(src); {
|
||||
if b := src[i]; b < utf8.RuneSelf {
|
||||
if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
e.WriteString(s[start:i])
|
||||
}
|
||||
e.WriteByte('\\')
|
||||
dst = append(dst, src[start:i]...)
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
e.WriteByte(b)
|
||||
dst = append(dst, '\\', b)
|
||||
case '\b':
|
||||
dst = append(dst, '\\', 'b')
|
||||
case '\f':
|
||||
dst = append(dst, '\\', 'f')
|
||||
case '\n':
|
||||
e.WriteByte('n')
|
||||
dst = append(dst, '\\', 'n')
|
||||
case '\r':
|
||||
e.WriteByte('r')
|
||||
dst = append(dst, '\\', 'r')
|
||||
case '\t':
|
||||
e.WriteByte('t')
|
||||
dst = append(dst, '\\', 't')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \t, \n and \r.
|
||||
// This encodes bytes < 0x20 except for \b, \f, \n, \r and \t.
|
||||
// If escapeHTML is set, it also escapes <, >, and &
|
||||
// because they can lead to security holes when
|
||||
// user-controlled strings are rendered into JSON
|
||||
// and served to some browsers.
|
||||
e.WriteString(`u00`)
|
||||
e.WriteByte(hex[b>>4])
|
||||
e.WriteByte(hex[b&0xF])
|
||||
dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRuneInString(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
e.WriteString(s[start:i])
|
||||
// TODO(https://go.dev/issue/56948): Use generic utf8 functionality.
|
||||
// For now, cast only a small portion of byte slices to a string
|
||||
// so that it can be stack allocated. This slows down []byte slightly
|
||||
// due to the extra copy, but keeps string performance roughly the same.
|
||||
n := len(src) - i
|
||||
if n > utf8.UTFMax {
|
||||
n = utf8.UTFMax
|
||||
}
|
||||
e.WriteString(`\ufffd`)
|
||||
c, size := utf8.DecodeRuneInString(string(src[i : i+n]))
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, `\ufffd`...)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
@ -1079,102 +1010,27 @@ func (e *encodeState) string(s string, escapeHTML bool) {
|
||||
// but don't work in JSONP, which has to be evaluated as JavaScript,
|
||||
// and can lead to security holes there. It is valid JSON to
|
||||
// escape them, so we do so unconditionally.
|
||||
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
|
||||
// See https://en.wikipedia.org/wiki/JSON#Safety.
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
e.WriteString(s[start:i])
|
||||
}
|
||||
e.WriteString(`\u202`)
|
||||
e.WriteByte(hex[c&0xF])
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, '\\', 'u', '2', '0', '2', hex[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
e.WriteString(s[start:])
|
||||
}
|
||||
e.WriteByte('"')
|
||||
}
|
||||
|
||||
// NOTE: keep in sync with string above.
|
||||
func (e *encodeState) stringBytes(s []byte, escapeHTML bool) {
|
||||
e.WriteByte('"')
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
e.Write(s[start:i])
|
||||
}
|
||||
e.WriteByte('\\')
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
e.WriteByte(b)
|
||||
case '\n':
|
||||
e.WriteByte('n')
|
||||
case '\r':
|
||||
e.WriteByte('r')
|
||||
case '\t':
|
||||
e.WriteByte('t')
|
||||
default:
|
||||
// This encodes bytes < 0x20 except for \t, \n and \r.
|
||||
// If escapeHTML is set, it also escapes <, >, and &
|
||||
// because they can lead to security holes when
|
||||
// user-controlled strings are rendered into JSON
|
||||
// and served to some browsers.
|
||||
e.WriteString(`u00`)
|
||||
e.WriteByte(hex[b>>4])
|
||||
e.WriteByte(hex[b&0xF])
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRune(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
e.Write(s[start:i])
|
||||
}
|
||||
e.WriteString(`\ufffd`)
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
// U+2028 is LINE SEPARATOR.
|
||||
// U+2029 is PARAGRAPH SEPARATOR.
|
||||
// They are both technically valid characters in JSON strings,
|
||||
// but don't work in JSONP, which has to be evaluated as JavaScript,
|
||||
// and can lead to security holes there. It is valid JSON to
|
||||
// escape them, so we do so unconditionally.
|
||||
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
e.Write(s[start:i])
|
||||
}
|
||||
e.WriteString(`\u202`)
|
||||
e.WriteByte(hex[c&0xF])
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
e.Write(s[start:])
|
||||
}
|
||||
e.WriteByte('"')
|
||||
dst = append(dst, src[start:]...)
|
||||
dst = append(dst, '"')
|
||||
return dst
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
nameBytes []byte // []byte(name)
|
||||
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||
|
||||
listIndex int // tracks the index of this field in the list of fields for a struct
|
||||
|
||||
nameNonEsc string // `"` + name + `":`
|
||||
nameEscHTML string // `"` + HTMLEscape(name) + `":`
|
||||
@ -1188,25 +1044,6 @@ type field struct {
|
||||
encoder encoderFunc
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
@ -1224,8 +1061,8 @@ func typeFields(t reflect.Type) structFields {
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
// Buffer to run HTMLEscape on field names.
|
||||
var nameEscBuf bytes.Buffer
|
||||
// Buffer to run appendHTMLEscape on field names.
|
||||
var nameEscBuf []byte
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
@ -1301,21 +1138,17 @@ func typeFields(t reflect.Type) structFields {
|
||||
quoted: quoted,
|
||||
}
|
||||
field.nameBytes = []byte(field.name)
|
||||
field.equalFold = foldFunc(field.nameBytes)
|
||||
|
||||
// Build nameEscHTML and nameNonEsc ahead of time.
|
||||
nameEscBuf.Reset()
|
||||
nameEscBuf.WriteString(`"`)
|
||||
HTMLEscape(&nameEscBuf, field.nameBytes)
|
||||
nameEscBuf.WriteString(`":`)
|
||||
field.nameEscHTML = nameEscBuf.String()
|
||||
nameEscBuf = appendHTMLEscape(nameEscBuf[:0], field.nameBytes)
|
||||
field.nameEscHTML = `"` + string(nameEscBuf) + `":`
|
||||
field.nameNonEsc = `"` + field.name + `":`
|
||||
|
||||
fields = append(fields, field)
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// It only cares about the distinction between 1 and 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
@ -1331,21 +1164,23 @@ func typeFields(t reflect.Type) structFields {
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(fields, func(i, j int) bool {
|
||||
x := fields
|
||||
slices.SortFunc(fields, func(a, b field) int {
|
||||
// sort field by name, breaking ties with depth, then
|
||||
// breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
if c := strings.Compare(a.name, b.name); c != 0 {
|
||||
return c
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
if c := cmp.Compare(len(a.index), len(b.index)); c != 0 {
|
||||
return c
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
if a.tag != b.tag {
|
||||
if a.tag {
|
||||
return -1
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
return +1
|
||||
}
|
||||
return slices.Compare(a.index, b.index)
|
||||
})
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
@ -1377,17 +1212,25 @@ func typeFields(t reflect.Type) structFields {
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
slices.SortFunc(fields, func(i, j field) int {
|
||||
return slices.Compare(i.index, j.index)
|
||||
})
|
||||
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
f.encoder = typeEncoder(typeByIndex(t, f.index))
|
||||
}
|
||||
nameIndex := make(map[string]int, len(fields))
|
||||
exactNameIndex := make(map[string]*field, len(fields))
|
||||
foldedNameIndex := make(map[string]*field, len(fields))
|
||||
for i, field := range fields {
|
||||
nameIndex[field.name] = i
|
||||
fields[i].listIndex = i
|
||||
exactNameIndex[field.name] = &fields[i]
|
||||
// For historical reasons, first folded match takes precedence.
|
||||
if _, ok := foldedNameIndex[string(foldName(field.nameBytes))]; !ok {
|
||||
foldedNameIndex[string(foldName(field.nameBytes))] = &fields[i]
|
||||
}
|
||||
return structFields{fields, nameIndex}
|
||||
}
|
||||
return structFields{fields, exactNameIndex, foldedNameIndex}
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
@ -1416,3 +1259,10 @@ func cachedTypeFields(t reflect.Type) structFields {
|
||||
f, _ := fieldCache.LoadOrStore(t, typeFields(t))
|
||||
return f.(structFields)
|
||||
}
|
||||
|
||||
func mayAppendQuote(b []byte, quoted bool) []byte {
|
||||
if quoted {
|
||||
b = append(b, '"')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
150
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go
generated
vendored
150
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go
generated
vendored
@ -5,140 +5,44 @@
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// - S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// - k maps to K and to U+212A 'K' Kelvin sign
|
||||
//
|
||||
// See https://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
// foldName returns a folded string such that foldName(x) == foldName(y)
|
||||
// is identical to bytes.EqualFold(x, y).
|
||||
func foldName(in []byte) []byte {
|
||||
// This is inlinable to take advantage of "function outlining".
|
||||
var arr [32]byte // large enough for most JSON names
|
||||
return appendFoldedName(arr[:0], in)
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
func appendFoldedName(out, in []byte) []byte {
|
||||
for i := 0; i < len(in); {
|
||||
// Handle single-byte ASCII.
|
||||
if c := in[i]; c < utf8.RuneSelf {
|
||||
if 'a' <= c && c <= 'z' {
|
||||
c -= 'a' - 'A'
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
out = append(out, c)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
// Handle multi-byte Unicode.
|
||||
r, n := utf8.DecodeRune(in[i:])
|
||||
out = utf8.AppendRune(out, foldRune(r))
|
||||
i += n
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return out
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
// foldRune is returns the smallest rune for all runes in the same fold set.
|
||||
func foldRune(r rune) rune {
|
||||
for {
|
||||
r2 := unicode.SimpleFold(r)
|
||||
if r2 <= r {
|
||||
return r2
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
r = r2
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
119
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/indent.go
generated
vendored
119
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/indent.go
generated
vendored
@ -4,38 +4,67 @@
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
import "bytes"
|
||||
|
||||
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
|
||||
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
|
||||
// so that the JSON will be safe to embed inside HTML <script> tags.
|
||||
// For historical reasons, web browsers don't honor standard HTML
|
||||
// escaping within <script> tags, so an alternative JSON encoding must be used.
|
||||
func HTMLEscape(dst *bytes.Buffer, src []byte) {
|
||||
dst.Grow(len(src))
|
||||
dst.Write(appendHTMLEscape(dst.AvailableBuffer(), src))
|
||||
}
|
||||
|
||||
func appendHTMLEscape(dst, src []byte) []byte {
|
||||
// The characters can only appear in string literals,
|
||||
// so just scan the string one byte at a time.
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if c == '<' || c == '>' || c == '&' {
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
dst = append(dst, src[start:i]...)
|
||||
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
|
||||
start = i + len("\u2029")
|
||||
}
|
||||
}
|
||||
return append(dst, src[start:]...)
|
||||
}
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
return compact(dst, src, false)
|
||||
dst.Grow(len(src))
|
||||
b := dst.AvailableBuffer()
|
||||
b, err := appendCompact(b, src, false)
|
||||
dst.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
origLen := dst.Len()
|
||||
func appendCompact(dst, src []byte, escape bool) ([]byte, error) {
|
||||
origLen := len(dst)
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
dst = append(dst, src[start:i]...)
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
dst = append(dst, src[start:i]...)
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
v := scan.step(scan, c)
|
||||
@ -44,29 +73,37 @@ func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
break
|
||||
}
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
dst = append(dst, src[start:i]...)
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
return dst[:origLen], scan.err
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
dst = append(dst, src[start:]...)
|
||||
}
|
||||
return nil
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
dst.WriteByte('\n')
|
||||
dst.WriteString(prefix)
|
||||
func appendNewline(dst []byte, prefix, indent string, depth int) []byte {
|
||||
dst = append(dst, '\n')
|
||||
dst = append(dst, prefix...)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst.WriteString(indent)
|
||||
dst = append(dst, indent...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// indentGrowthFactor specifies the growth factor of indenting JSON input.
|
||||
// Empirically, the growth factor was measured to be between 1.4x to 1.8x
|
||||
// for some set of compacted JSON with the indent being a single tab.
|
||||
// Specify a growth factor slightly larger than what is observed
|
||||
// to reduce probability of allocation in appendIndent.
|
||||
// A factor no higher than 2 ensures that wasted space never exceeds 50%.
|
||||
const indentGrowthFactor = 2
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
@ -79,7 +116,15 @@ func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
// For example, if src has no trailing spaces, neither will dst;
|
||||
// if src ends in a trailing newline, so will dst.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
origLen := dst.Len()
|
||||
dst.Grow(indentGrowthFactor * len(src))
|
||||
b := dst.AvailableBuffer()
|
||||
b, err := appendIndent(b, src, prefix, indent)
|
||||
dst.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
|
||||
origLen := len(dst)
|
||||
scan := newScanner()
|
||||
defer freeScanner(scan)
|
||||
needIndent := false
|
||||
@ -96,13 +141,13 @@ func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
newline(dst, prefix, indent, depth)
|
||||
dst = appendNewline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst.WriteByte(c)
|
||||
dst = append(dst, c)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -111,33 +156,27 @@ func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst.WriteByte(c)
|
||||
|
||||
dst = append(dst, c)
|
||||
case ',':
|
||||
dst.WriteByte(c)
|
||||
newline(dst, prefix, indent, depth)
|
||||
|
||||
dst = append(dst, c)
|
||||
dst = appendNewline(dst, prefix, indent, depth)
|
||||
case ':':
|
||||
dst.WriteByte(c)
|
||||
dst.WriteByte(' ')
|
||||
|
||||
dst = append(dst, c, ' ')
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
newline(dst, prefix, indent, depth)
|
||||
dst = appendNewline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst.WriteByte(c)
|
||||
|
||||
dst = append(dst, c)
|
||||
default:
|
||||
dst.WriteByte(c)
|
||||
dst = append(dst, c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
return dst[:origLen], scan.err
|
||||
}
|
||||
return nil
|
||||
return dst, nil
|
||||
}
|
||||
|
4
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go
generated
vendored
4
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go
generated
vendored
@ -43,7 +43,7 @@ func checkValid(data []byte, scan *scanner) error {
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
|
||||
// [Unmarshal] will return a SyntaxError if the JSON can't be parsed.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
@ -594,7 +594,7 @@ func (s *scanner) error(c byte, context string) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal
|
||||
// quoteChar formats c as a quoted character literal.
|
||||
func quoteChar(c byte) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
|
41
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go
generated
vendored
41
api/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go
generated
vendored
@ -32,7 +32,7 @@ func NewDecoder(r io.Reader) *Decoder {
|
||||
}
|
||||
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
// [Number] instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||
|
||||
// DisallowUnknownFields causes the Decoder to return an error when the destination
|
||||
@ -43,7 +43,7 @@ func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// See the documentation for [Unmarshal] for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v any) error {
|
||||
if dec.err != nil {
|
||||
@ -78,7 +78,7 @@ func (dec *Decoder) Decode(v any) error {
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
// buffer. The reader is valid until the next call to [Decoder.Decode].
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||
}
|
||||
@ -182,7 +182,7 @@ type Encoder struct {
|
||||
err error
|
||||
escapeHTML bool
|
||||
|
||||
indentBuf *bytes.Buffer
|
||||
indentBuf []byte
|
||||
indentPrefix string
|
||||
indentValue string
|
||||
}
|
||||
@ -193,15 +193,19 @@ func NewEncoder(w io.Writer) *Encoder {
|
||||
}
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// with insignificant space characters elided,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// See the documentation for [Marshal] for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
|
||||
e := newEncodeState()
|
||||
defer encodeStatePool.Put(e)
|
||||
|
||||
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -217,20 +221,15 @@ func (enc *Encoder) Encode(v any) error {
|
||||
|
||||
b := e.Bytes()
|
||||
if enc.indentPrefix != "" || enc.indentValue != "" {
|
||||
if enc.indentBuf == nil {
|
||||
enc.indentBuf = new(bytes.Buffer)
|
||||
}
|
||||
enc.indentBuf.Reset()
|
||||
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
|
||||
enc.indentBuf, err = appendIndent(enc.indentBuf[:0], b, enc.indentPrefix, enc.indentValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b = enc.indentBuf.Bytes()
|
||||
b = enc.indentBuf
|
||||
}
|
||||
if _, err = enc.w.Write(b); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
encodeStatePool.Put(e)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -255,7 +254,7 @@ func (enc *Encoder) SetEscapeHTML(on bool) {
|
||||
|
||||
/*
|
||||
// RawMessage is a raw encoded JSON value.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// It implements [Marshaler] and [Unmarshaler] and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage []byte
|
||||
|
||||
@ -283,12 +282,12 @@ var _ Unmarshaler = (*RawMessage)(nil)
|
||||
/*
|
||||
// A Token holds a value of one of these types:
|
||||
//
|
||||
// Delim, for the four JSON delimiters [ ] { }
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
// - [Delim], for the four JSON delimiters [ ] { }
|
||||
// - bool, for JSON booleans
|
||||
// - float64, for JSON numbers
|
||||
// - [Number], for JSON numbers
|
||||
// - string, for JSON string literals
|
||||
// - nil, for JSON null
|
||||
type Token any
|
||||
*/
|
||||
|
||||
@ -361,14 +360,14 @@ func (d Delim) String() string {
|
||||
*/
|
||||
|
||||
// Token returns the next JSON token in the input stream.
|
||||
// At the end of the input stream, Token returns nil, io.EOF.
|
||||
// At the end of the input stream, Token returns nil, [io.EOF].
|
||||
//
|
||||
// Token guarantees that the delimiters [ ] { } it returns are
|
||||
// properly nested and matched: if Token encounters an unexpected
|
||||
// delimiter in the input, it will return an error.
|
||||
//
|
||||
// The input stream consists of basic JSON values—bool, string,
|
||||
// number, and null—along with delimiters [ ] { } of type Delim
|
||||
// number, and null—along with delimiters [ ] { } of type [Delim]
|
||||
// to mark the start and end of arrays and objects.
|
||||
// Commas and colons are elided.
|
||||
func (dec *Decoder) Token() (Token, error) {
|
||||
|
51
api/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
generated
vendored
51
api/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go
generated
vendored
@ -19,7 +19,9 @@ package value
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -184,6 +186,11 @@ func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, er
|
||||
// This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505
|
||||
// and is intended to replace it.
|
||||
|
||||
// Check if the object is a nil pointer.
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
// We're done - we don't need to store anything.
|
||||
return nil, nil
|
||||
}
|
||||
// Check if the object has a custom string converter and use it if available, since it is much more efficient
|
||||
// than round tripping through json.
|
||||
if converter, ok := e.getUnstructuredConverter(sv); ok {
|
||||
@ -191,11 +198,6 @@ func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, er
|
||||
}
|
||||
// Check if the object has a custom JSON marshaller/unmarshaller.
|
||||
if marshaler, ok := e.getJsonMarshaler(sv); ok {
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
// We're done - we don't need to store anything.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := marshaler.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -379,8 +381,6 @@ const maxDepth = 10000
|
||||
// unmarshal unmarshals the given data
|
||||
// If v is a *map[string]interface{}, numbers are converted to int64 or float64
|
||||
func unmarshal(data []byte, v interface{}) error {
|
||||
switch v := v.(type) {
|
||||
case *map[string]interface{}:
|
||||
// Build a decoder from the given data
|
||||
decoder := json.NewDecoder(bytes.NewBuffer(data))
|
||||
// Preserve numbers, rather than casting to float64 automatically
|
||||
@ -389,26 +389,41 @@ func unmarshal(data []byte, v interface{}) error {
|
||||
if err := decoder.Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
next := decoder.InputOffset()
|
||||
if _, err := decoder.Token(); !errors.Is(err, io.EOF) {
|
||||
tail := bytes.TrimLeft(data[next:], " \t\r\n")
|
||||
return fmt.Errorf("unexpected trailing data at offset %d", len(data)-len(tail))
|
||||
}
|
||||
|
||||
// If the decode succeeds, post-process the object to convert json.Number objects to int64 or float64
|
||||
switch v := v.(type) {
|
||||
case *map[string]interface{}:
|
||||
return convertMapNumbers(*v, 0)
|
||||
|
||||
case *[]interface{}:
|
||||
// Build a decoder from the given data
|
||||
decoder := json.NewDecoder(bytes.NewBuffer(data))
|
||||
// Preserve numbers, rather than casting to float64 automatically
|
||||
decoder.UseNumber()
|
||||
// Run the decode
|
||||
if err := decoder.Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
return convertSliceNumbers(*v, 0)
|
||||
|
||||
case *interface{}:
|
||||
return convertInterfaceNumbers(v, 0)
|
||||
|
||||
default:
|
||||
return json.Unmarshal(data, v)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func convertInterfaceNumbers(v *interface{}, depth int) error {
|
||||
var err error
|
||||
switch v2 := (*v).(type) {
|
||||
case json.Number:
|
||||
*v, err = convertNumber(v2)
|
||||
case map[string]interface{}:
|
||||
err = convertMapNumbers(v2, depth+1)
|
||||
case []interface{}:
|
||||
err = convertSliceNumbers(v2, depth+1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
|
||||
// values which are map[string]interface{} or []interface{} are recursively visited
|
||||
func convertMapNumbers(m map[string]interface{}, depth int) error {
|
||||
|
2
api/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go
generated
vendored
2
api/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"strings"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"gopkg.in/yaml.v2"
|
||||
yaml "sigs.k8s.io/yaml/goyaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
|
306
api/vendor/sigs.k8s.io/yaml/LICENSE
generated
vendored
Normal file
306
api/vendor/sigs.k8s.io/yaml/LICENSE
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Sam Ghods
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# The forked go-yaml.v3 library under this project is covered by two
|
||||
different licenses (MIT and Apache):
|
||||
|
||||
#### MIT License ####
|
||||
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original MIT license, with the additional
|
||||
copyright staring in 2011 when the project was ported over:
|
||||
|
||||
apic.go emitterc.go parserc.go readerc.go scannerc.go
|
||||
writerc.go yamlh.go yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006-2010 Kirill Simonov
|
||||
Copyright (c) 2006-2011 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
### Apache License ###
|
||||
|
||||
All the remaining project files are covered by the Apache license:
|
||||
|
||||
Copyright (c) 2011-2019 Canonical Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
# The forked go-yaml.v2 library under the project is covered by an
|
||||
Apache license:
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
201
api/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE
generated
vendored
Normal file
201
api/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
31
api/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
generated
vendored
Normal file
31
api/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original copyright and license:
|
||||
|
||||
apic.go
|
||||
emitterc.go
|
||||
parserc.go
|
||||
readerc.go
|
||||
scannerc.go
|
||||
writerc.go
|
||||
yamlh.go
|
||||
yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
13
api/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
generated
vendored
Normal file
13
api/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
24
api/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
generated
vendored
Normal file
24
api/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dims
|
||||
- jpbetz
|
||||
- smarterclayton
|
||||
- deads2k
|
||||
- sttts
|
||||
- liggitt
|
||||
- natasha41575
|
||||
- knverey
|
||||
reviewers:
|
||||
- dims
|
||||
- thockin
|
||||
- jpbetz
|
||||
- smarterclayton
|
||||
- deads2k
|
||||
- derekwaynecarr
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- sttts
|
||||
- tallclair
|
||||
labels:
|
||||
- sig/api-machinery
|
143
api/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
generated
vendored
Normal file
143
api/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
# go-yaml fork
|
||||
|
||||
This package is a fork of the go-yaml library and is intended solely for consumption
|
||||
by kubernetes projects. In this fork, we plan to support only critical changes required for
|
||||
kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
|
||||
should be made in the upstream go-yaml library, and we will reject such changes in this fork
|
||||
unless we are pulling them from upstream.
|
||||
|
||||
This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
|
||||
|
||||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v2*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
744
api/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
generated
vendored
Normal file
744
api/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
generated
vendored
Normal file
@ -0,0 +1,744 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
var disableLineWrapping = false
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
}
|
||||
if disableLineWrapping {
|
||||
emitter.best_width = -1
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
//// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create ALIAS.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
||||
//{
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// anchor_copy *yaml_char_t = NULL
|
||||
//
|
||||
// assert(event) // Non-NULL event object is expected.
|
||||
// assert(anchor) // Non-NULL anchor is expected.
|
||||
//
|
||||
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
||||
//
|
||||
// anchor_copy = yaml_strdup(anchor)
|
||||
// if (!anchor_copy)
|
||||
// return 0
|
||||
//
|
||||
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
815
api/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
generated
vendored
Normal file
815
api/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
generated
vendored
Normal file
@ -0,0 +1,815 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
documentNode = 1 << iota
|
||||
mappingNode
|
||||
sequenceNode
|
||||
scalarNode
|
||||
aliasNode
|
||||
)
|
||||
|
||||
type node struct {
|
||||
kind int
|
||||
line, column int
|
||||
tag string
|
||||
// For an alias node, alias holds the resolved alias.
|
||||
alias *node
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
b = []byte{'\n'}
|
||||
}
|
||||
yaml_parser_set_input_string(&p.parser, b)
|
||||
return &p
|
||||
}
|
||||
|
||||
func newParserFromReader(r io.Reader) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
yaml_parser_set_input_reader(&p.parser, r)
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *parser) init() {
|
||||
if p.doneInit {
|
||||
return
|
||||
}
|
||||
p.expect(yaml_STREAM_START_EVENT)
|
||||
p.doneInit = true
|
||||
}
|
||||
|
||||
func (p *parser) destroy() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
yaml_parser_delete(&p.parser)
|
||||
}
|
||||
|
||||
// expect consumes an event from the event stream and
|
||||
// checks that it's of the expected type.
|
||||
func (p *parser) expect(e yaml_event_type_t) {
|
||||
if p.event.typ == yaml_NO_EVENT {
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
}
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
failf("attempted to go past the end of stream; corrupted value?")
|
||||
}
|
||||
if p.event.typ != e {
|
||||
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
|
||||
p.fail()
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
p.event.typ = yaml_NO_EVENT
|
||||
}
|
||||
|
||||
// peek peeks at the next event in the event stream,
|
||||
// puts the results into p.event and returns the event type.
|
||||
func (p *parser) peek() yaml_event_type_t {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
return p.event.typ
|
||||
}
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
return p.event.typ
|
||||
}
|
||||
|
||||
func (p *parser) fail() {
|
||||
var where string
|
||||
var line int
|
||||
if p.parser.problem_mark.line != 0 {
|
||||
line = p.parser.problem_mark.line
|
||||
// Scanner errors don't iterate line before returning error
|
||||
if p.parser.error == yaml_SCANNER_ERROR {
|
||||
line++
|
||||
}
|
||||
} else if p.parser.context_mark.line != 0 {
|
||||
line = p.parser.context_mark.line
|
||||
}
|
||||
if line != 0 {
|
||||
where = "line " + strconv.Itoa(line) + ": "
|
||||
}
|
||||
var msg string
|
||||
if len(p.parser.problem) > 0 {
|
||||
msg = p.parser.problem
|
||||
} else {
|
||||
msg = "unknown problem parsing YAML content"
|
||||
}
|
||||
failf("%s%s", where, msg)
|
||||
}
|
||||
|
||||
func (p *parser) anchor(n *node, anchor []byte) {
|
||||
if anchor != nil {
|
||||
p.doc.anchors[string(anchor)] = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parse() *node {
|
||||
p.init()
|
||||
switch p.peek() {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return p.scalar()
|
||||
case yaml_ALIAS_EVENT:
|
||||
return p.alias()
|
||||
case yaml_MAPPING_START_EVENT:
|
||||
return p.mapping()
|
||||
case yaml_SEQUENCE_START_EVENT:
|
||||
return p.sequence()
|
||||
case yaml_DOCUMENT_START_EVENT:
|
||||
return p.document()
|
||||
case yaml_STREAM_END_EVENT:
|
||||
// Happens when attempting to decode an empty buffer.
|
||||
return nil
|
||||
default:
|
||||
panic("attempted to parse unknown event: " + p.event.typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) node(kind int) *node {
|
||||
return &node{
|
||||
kind: kind,
|
||||
line: p.event.start_mark.line,
|
||||
column: p.event.start_mark.column,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) document() *node {
|
||||
n := p.node(documentNode)
|
||||
n.anchors = make(map[string]*node)
|
||||
p.doc = n
|
||||
p.expect(yaml_DOCUMENT_START_EVENT)
|
||||
n.children = append(n.children, p.parse())
|
||||
p.expect(yaml_DOCUMENT_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) alias() *node {
|
||||
n := p.node(aliasNode)
|
||||
n.value = string(p.event.anchor)
|
||||
n.alias = p.doc.anchors[n.value]
|
||||
if n.alias == nil {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
}
|
||||
p.expect(yaml_ALIAS_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) scalar() *node {
|
||||
n := p.node(scalarNode)
|
||||
n.value = string(p.event.value)
|
||||
n.tag = string(p.event.tag)
|
||||
n.implicit = p.event.implicit
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SCALAR_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) sequence() *node {
|
||||
n := p.node(sequenceNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SEQUENCE_START_EVENT)
|
||||
for p.peek() != yaml_SEQUENCE_END_EVENT {
|
||||
n.children = append(n.children, p.parse())
|
||||
}
|
||||
p.expect(yaml_SEQUENCE_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) mapping() *node {
|
||||
n := p.node(mappingNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_MAPPING_START_EVENT)
|
||||
for p.peek() != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
}
|
||||
p.expect(yaml_MAPPING_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Decoder, unmarshals a node into a provided value.
|
||||
|
||||
type decoder struct {
|
||||
doc *node
|
||||
aliases map[*node]bool
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
strict bool
|
||||
|
||||
decodeCount int
|
||||
aliasCount int
|
||||
aliasDepth int
|
||||
}
|
||||
|
||||
var (
|
||||
mapItemType = reflect.TypeOf(MapItem{})
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||||
ifaceType = defaultMapType.Elem()
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
ptrTimeType = reflect.TypeOf(&time.Time{})
|
||||
)
|
||||
|
||||
func newDecoder(strict bool) *decoder {
|
||||
d := &decoder{mapType: defaultMapType, strict: strict}
|
||||
d.aliases = make(map[*node]bool)
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||
if n.tag != "" {
|
||||
tag = n.tag
|
||||
}
|
||||
value := n.value
|
||||
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
||||
if len(value) > 10 {
|
||||
value = " `" + value[:7] + "...`"
|
||||
} else {
|
||||
value = " `" + value + "`"
|
||||
}
|
||||
}
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
||||
}
|
||||
|
||||
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
terrlen := len(d.terrors)
|
||||
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
d.unmarshal(n, reflect.ValueOf(v))
|
||||
if len(d.terrors) > terrlen {
|
||||
issues := d.terrors[terrlen:]
|
||||
d.terrors = d.terrors[:terrlen]
|
||||
return &TypeError{issues}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if e, ok := err.(*TypeError); ok {
|
||||
d.terrors = append(d.terrors, e.Errors...)
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
||||
// if a value is found to implement it.
|
||||
// It returns the initialized and dereferenced out value, whether
|
||||
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
||||
// its types unmarshalled appropriately.
|
||||
//
|
||||
// If n holds a null value, prepare returns before doing anything.
|
||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
|
||||
return out, false, false
|
||||
}
|
||||
again := true
|
||||
for again {
|
||||
again = false
|
||||
if out.Kind() == reflect.Ptr {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(out.Type().Elem()))
|
||||
}
|
||||
out = out.Elem()
|
||||
again = true
|
||||
}
|
||||
if out.CanAddr() {
|
||||
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
|
||||
good = d.callUnmarshaler(n, u)
|
||||
return out, true, good
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
const (
|
||||
// 400,000 decode operations is ~500kb of dense object declarations, or
|
||||
// ~5kb of dense object declarations with 10000% alias expansion
|
||||
alias_ratio_range_low = 400000
|
||||
|
||||
// 4,000,000 decode operations is ~5MB of dense object declarations, or
|
||||
// ~4.5MB of dense object declarations with 10% alias expansion
|
||||
alias_ratio_range_high = 4000000
|
||||
|
||||
// alias_ratio_range is the range over which we scale allowed alias ratios
|
||||
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
|
||||
)
|
||||
|
||||
func allowedAliasRatio(decodeCount int) float64 {
|
||||
switch {
|
||||
case decodeCount <= alias_ratio_range_low:
|
||||
// allow 99% to come from alias expansion for small-to-medium documents
|
||||
return 0.99
|
||||
case decodeCount >= alias_ratio_range_high:
|
||||
// allow 10% to come from alias expansion for very large documents
|
||||
return 0.10
|
||||
default:
|
||||
// scale smoothly from 99% down to 10% over the range.
|
||||
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
|
||||
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
|
||||
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
d.decodeCount++
|
||||
if d.aliasDepth > 0 {
|
||||
d.aliasCount++
|
||||
}
|
||||
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
|
||||
failf("document contains excessive aliasing")
|
||||
}
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
return d.document(n, out)
|
||||
case aliasNode:
|
||||
return d.alias(n, out)
|
||||
}
|
||||
out, unmarshaled, good := d.prepare(n, out)
|
||||
if unmarshaled {
|
||||
return good
|
||||
}
|
||||
switch n.kind {
|
||||
case scalarNode:
|
||||
good = d.scalar(n, out)
|
||||
case mappingNode:
|
||||
good = d.mapping(n, out)
|
||||
case sequenceNode:
|
||||
good = d.sequence(n, out)
|
||||
default:
|
||||
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
if len(n.children) == 1 {
|
||||
d.doc = n
|
||||
d.unmarshal(n.children[0], out)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
if d.aliases[n] {
|
||||
// TODO this could actually be allowed in some circumstances.
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
}
|
||||
d.aliases[n] = true
|
||||
d.aliasDepth++
|
||||
good = d.unmarshal(n.alias, out)
|
||||
d.aliasDepth--
|
||||
delete(d.aliases, n)
|
||||
return good
|
||||
}
|
||||
|
||||
var zeroValue reflect.Value
|
||||
|
||||
func resetMap(out reflect.Value) {
|
||||
for _, k := range out.MapKeys() {
|
||||
out.SetMapIndex(k, zeroValue)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
tag = yaml_STR_TAG
|
||||
resolved = n.value
|
||||
} else {
|
||||
tag, resolved = resolve(n.tag, n.value)
|
||||
if tag == yaml_BINARY_TAG {
|
||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||
if err != nil {
|
||||
failf("!!binary value contains invalid base64 data")
|
||||
}
|
||||
resolved = string(data)
|
||||
}
|
||||
}
|
||||
if resolved == nil {
|
||||
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||
resetMap(out)
|
||||
} else {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
}
|
||||
return true
|
||||
}
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
// We've resolved to exactly the type we want, so use that.
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
// Perhaps we can use the value as a TextUnmarshaler to
|
||||
// set its value.
|
||||
if out.CanAddr() {
|
||||
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
|
||||
if ok {
|
||||
var text []byte
|
||||
if tag == yaml_BINARY_TAG {
|
||||
text = []byte(resolved.(string))
|
||||
} else {
|
||||
// We let any value be unmarshaled into TextUnmarshaler.
|
||||
// That might be more lax than we'd like, but the
|
||||
// TextUnmarshaler itself should bowl out any dubious values.
|
||||
text = []byte(n.value)
|
||||
}
|
||||
err := u.UnmarshalText(text)
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
switch out.Kind() {
|
||||
case reflect.String:
|
||||
if tag == yaml_BINARY_TAG {
|
||||
out.SetString(resolved.(string))
|
||||
return true
|
||||
}
|
||||
if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
return true
|
||||
}
|
||||
case reflect.Interface:
|
||||
if resolved == nil {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
} else if tag == yaml_TIMESTAMP_TAG {
|
||||
// It looks like a timestamp but for backward compatibility
|
||||
// reasons we set it as a string, so that code that unmarshals
|
||||
// timestamp-like values into interface{} will continue to
|
||||
// see a string and not a time.Time.
|
||||
// TODO(v3) Drop this.
|
||||
out.Set(reflect.ValueOf(n.value))
|
||||
} else {
|
||||
out.Set(reflect.ValueOf(resolved))
|
||||
}
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if !out.OverflowInt(resolved) {
|
||||
out.SetInt(resolved)
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case string:
|
||||
if out.Type() == durationType {
|
||||
d, err := time.ParseDuration(resolved)
|
||||
if err == nil {
|
||||
out.SetInt(int64(d))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch resolved := resolved.(type) {
|
||||
case bool:
|
||||
out.SetBool(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case int64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case uint64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case float64:
|
||||
out.SetFloat(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Struct:
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
return true
|
||||
}
|
||||
}
|
||||
d.terror(n, tag, out)
|
||||
return false
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
l := len(n.children)
|
||||
|
||||
var iface reflect.Value
|
||||
switch out.Kind() {
|
||||
case reflect.Slice:
|
||||
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||||
case reflect.Array:
|
||||
if l != out.Len() {
|
||||
failf("invalid array: want %d elements but got %d", out.Len(), l)
|
||||
}
|
||||
case reflect.Interface:
|
||||
// No type hints. Will have to use a generic sequence.
|
||||
iface = out
|
||||
out = settableValueOf(make([]interface{}, l))
|
||||
default:
|
||||
d.terror(n, yaml_SEQ_TAG, out)
|
||||
return false
|
||||
}
|
||||
et := out.Type().Elem()
|
||||
|
||||
j := 0
|
||||
for i := 0; i < l; i++ {
|
||||
e := reflect.New(et).Elem()
|
||||
if ok := d.unmarshal(n.children[i], e); ok {
|
||||
out.Index(j).Set(e)
|
||||
j++
|
||||
}
|
||||
}
|
||||
if out.Kind() != reflect.Array {
|
||||
out.Set(out.Slice(0, j))
|
||||
}
|
||||
if iface.IsValid() {
|
||||
iface.Set(out)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
switch out.Kind() {
|
||||
case reflect.Struct:
|
||||
return d.mappingStruct(n, out)
|
||||
case reflect.Slice:
|
||||
return d.mappingSlice(n, out)
|
||||
case reflect.Map:
|
||||
// okay
|
||||
case reflect.Interface:
|
||||
if d.mapType.Kind() == reflect.Map {
|
||||
iface := out
|
||||
out = reflect.MakeMap(d.mapType)
|
||||
iface.Set(out)
|
||||
} else {
|
||||
slicev := reflect.New(d.mapType).Elem()
|
||||
if !d.mappingSlice(n, slicev) {
|
||||
return false
|
||||
}
|
||||
out.Set(slicev)
|
||||
return true
|
||||
}
|
||||
default:
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
outt := out.Type()
|
||||
kt := outt.Key()
|
||||
et := outt.Elem()
|
||||
|
||||
mapType := d.mapType
|
||||
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
|
||||
d.mapType = outt
|
||||
}
|
||||
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(outt))
|
||||
}
|
||||
l := len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
k := reflect.New(kt).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
kkind = k.Elem().Kind()
|
||||
}
|
||||
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||
failf("invalid map key: %#v", k.Interface())
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
d.setMapIndex(n.children[i+1], out, k, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
|
||||
if d.strict && out.MapIndex(k) != zeroValue {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
|
||||
return
|
||||
}
|
||||
out.SetMapIndex(k, v)
|
||||
}
|
||||
|
||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
outt := out.Type()
|
||||
if outt.Elem() != mapItemType {
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
|
||||
mapType := d.mapType
|
||||
d.mapType = outt
|
||||
|
||||
var slice []MapItem
|
||||
var l = len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
item := MapItem{}
|
||||
k := reflect.ValueOf(&item.Key).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
v := reflect.ValueOf(&item.Value).Elem()
|
||||
if d.unmarshal(n.children[i+1], v) {
|
||||
slice = append(slice, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name := settableValueOf("")
|
||||
l := len(n.children)
|
||||
|
||||
var inlineMap reflect.Value
|
||||
var elemType reflect.Type
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
|
||||
elemType = inlineMap.Type().Elem()
|
||||
}
|
||||
|
||||
var doneFields []bool
|
||||
if d.strict {
|
||||
doneFields = make([]bool, len(sinfo.FieldsList))
|
||||
}
|
||||
for i := 0; i < l; i += 2 {
|
||||
ni := n.children[i]
|
||||
if isMerge(ni) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
if !d.unmarshal(ni, name) {
|
||||
continue
|
||||
}
|
||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||
if d.strict {
|
||||
if doneFields[info.Id] {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
|
||||
continue
|
||||
}
|
||||
doneFields[info.Id] = true
|
||||
}
|
||||
var field reflect.Value
|
||||
if info.Inline == nil {
|
||||
field = out.Field(info.Num)
|
||||
} else {
|
||||
field = out.FieldByIndex(info.Inline)
|
||||
}
|
||||
d.unmarshal(n.children[i+1], field)
|
||||
} else if sinfo.InlineMap != -1 {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
value := reflect.New(elemType).Elem()
|
||||
d.unmarshal(n.children[i+1], value)
|
||||
d.setMapIndex(n.children[i+1], inlineMap, name, value)
|
||||
} else if d.strict {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func failWantMap() {
|
||||
failf("map merge requires map or sequence of maps as the value")
|
||||
}
|
||||
|
||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
switch n.kind {
|
||||
case mappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
if n.alias != nil && n.alias.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
case sequenceNode:
|
||||
// Step backwards as earlier nodes take precedence.
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
if ni.alias != nil && ni.alias.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(ni, out)
|
||||
}
|
||||
default:
|
||||
failWantMap()
|
||||
}
|
||||
}
|
||||
|
||||
func isMerge(n *node) bool {
|
||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||
}
|
1685
api/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
generated
vendored
Normal file
1685
api/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
390
api/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
generated
vendored
Normal file
390
api/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
generated
vendored
Normal file
@ -0,0 +1,390 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// jsonNumber is the interface of the encoding/json.Number datatype.
|
||||
// Repeating the interface here avoids a dependency on encoding/json, and also
|
||||
// supports other libraries like jsoniter, which use a similar datatype with
|
||||
// the same interface. Detecting this interface is useful when dealing with
|
||||
// structures containing json.Number, which is a string under the hood. The
|
||||
// encoder should prefer the use of Int64(), Float64() and string(), in that
|
||||
// order, when encoding this type.
|
||||
type jsonNumber interface {
|
||||
Float64() (float64, error)
|
||||
Int64() (int64, error)
|
||||
String() string
|
||||
}
|
||||
|
||||
type encoder struct {
|
||||
emitter yaml_emitter_t
|
||||
event yaml_event_t
|
||||
out []byte
|
||||
flow bool
|
||||
// doneInit holds whether the initial stream_start_event has been
|
||||
// emitted.
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newEncoder() *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func newEncoderWithWriter(w io.Writer) *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_writer(&e.emitter, w)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) init() {
|
||||
if e.doneInit {
|
||||
return
|
||||
}
|
||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
||||
e.emit()
|
||||
e.doneInit = true
|
||||
}
|
||||
|
||||
func (e *encoder) finish() {
|
||||
e.emitter.open_ended = false
|
||||
yaml_stream_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) destroy() {
|
||||
yaml_emitter_delete(&e.emitter)
|
||||
}
|
||||
|
||||
func (e *encoder) emit() {
|
||||
// This will internally delete the e.event value.
|
||||
e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
||||
}
|
||||
|
||||
func (e *encoder) must(ok bool) {
|
||||
if !ok {
|
||||
msg := e.emitter.problem
|
||||
if msg == "" {
|
||||
msg = "unknown problem generating YAML content"
|
||||
}
|
||||
failf("%s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
||||
e.init()
|
||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||||
e.emit()
|
||||
e.marshal(tag, in)
|
||||
yaml_document_end_event_initialize(&e.event, true)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
iface := in.Interface()
|
||||
switch m := iface.(type) {
|
||||
case jsonNumber:
|
||||
integer, err := m.Int64()
|
||||
if err == nil {
|
||||
// In this case the json.Number is a valid int64
|
||||
in = reflect.ValueOf(integer)
|
||||
break
|
||||
}
|
||||
float, err := m.Float64()
|
||||
if err == nil {
|
||||
// In this case the json.Number is a valid float64
|
||||
in = reflect.ValueOf(float)
|
||||
break
|
||||
}
|
||||
// fallback case - no number could be obtained
|
||||
in = reflect.ValueOf(m.String())
|
||||
case time.Time, *time.Time:
|
||||
// Although time.Time implements TextMarshaler,
|
||||
// we don't want to treat it as a string for YAML
|
||||
// purposes because YAML has special support for
|
||||
// timestamps.
|
||||
case Marshaler:
|
||||
v, err := m.MarshalYAML()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
if v == nil {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
in = reflect.ValueOf(v)
|
||||
case encoding.TextMarshaler:
|
||||
text, err := m.MarshalText()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
in = reflect.ValueOf(string(text))
|
||||
case nil:
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Interface:
|
||||
e.marshal(tag, in.Elem())
|
||||
case reflect.Map:
|
||||
e.mapv(tag, in)
|
||||
case reflect.Ptr:
|
||||
if in.Type() == ptrTimeType {
|
||||
e.timev(tag, in.Elem())
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Struct:
|
||||
if in.Type() == timeType {
|
||||
e.timev(tag, in)
|
||||
} else {
|
||||
e.structv(tag, in)
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if in.Type().Elem() == mapItemType {
|
||||
e.itemsv(tag, in)
|
||||
} else {
|
||||
e.slicev(tag, in)
|
||||
}
|
||||
case reflect.String:
|
||||
e.stringv(tag, in)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if in.Type() == durationType {
|
||||
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
|
||||
} else {
|
||||
e.intv(tag, in)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
e.uintv(tag, in)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.floatv(tag, in)
|
||||
case reflect.Bool:
|
||||
e.boolv(tag, in)
|
||||
default:
|
||||
panic("cannot marshal type: " + in.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) mapv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
keys := keyList(in.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
e.marshal("", k)
|
||||
e.marshal("", in.MapIndex(k))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) itemsv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
|
||||
for _, item := range slice {
|
||||
e.marshal("", reflect.ValueOf(item.Key))
|
||||
e.marshal("", reflect.ValueOf(item.Value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) structv(tag string, in reflect.Value) {
|
||||
sinfo, err := getStructInfo(in.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.mappingv(tag, func() {
|
||||
for _, info := range sinfo.FieldsList {
|
||||
var value reflect.Value
|
||||
if info.Inline == nil {
|
||||
value = in.Field(info.Num)
|
||||
} else {
|
||||
value = in.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.marshal("", reflect.ValueOf(info.Key))
|
||||
e.flow = info.Flow
|
||||
e.marshal("", value)
|
||||
}
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := in.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
e.flow = false
|
||||
keys := keyList(m.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
if _, found := sinfo.FieldsMap[k.String()]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
|
||||
}
|
||||
e.marshal("", k)
|
||||
e.flow = false
|
||||
e.marshal("", m.MapIndex(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) mappingv(tag string, f func()) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
||||
e.emit()
|
||||
f()
|
||||
yaml_mapping_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
n := in.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
e.marshal("", in.Index(i))
|
||||
}
|
||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||
//
|
||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||
// the time being for compatibility with other parsers.
|
||||
func isBase60Float(s string) (result bool) {
|
||||
// Fast path.
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||
return false
|
||||
}
|
||||
// Do the full match.
|
||||
return base60float.MatchString(s)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/float.html, except the regular expression there
|
||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
canUsePlain := true
|
||||
switch {
|
||||
case !utf8.ValidString(s):
|
||||
if tag == yaml_BINARY_TAG {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
}
|
||||
if tag != "" {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||
}
|
||||
// It can't be encoded directly as YAML so use a binary tag
|
||||
// and encode it as base64.
|
||||
tag = yaml_BINARY_TAG
|
||||
s = encodeBase64(s)
|
||||
case tag == "":
|
||||
// Check to see if it would resolve to a specific
|
||||
// tag when encoded unquoted. If it doesn't,
|
||||
// there's no need to quote it.
|
||||
rtag, _ := resolve("", s)
|
||||
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
|
||||
}
|
||||
// Note: it's possible for user code to emit invalid YAML
|
||||
// if they explicitly specify a tag and a string containing
|
||||
// text that's incompatible with that tag.
|
||||
switch {
|
||||
case strings.Contains(s, "\n"):
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
case canUsePlain:
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
default:
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
}
|
||||
e.emitScalar(s, "", tag, style)
|
||||
}
|
||||
|
||||
func (e *encoder) boolv(tag string, in reflect.Value) {
|
||||
var s string
|
||||
if in.Bool() {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) intv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatInt(in.Int(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatUint(in.Uint(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) timev(tag string, in reflect.Value) {
|
||||
t := in.Interface().(time.Time)
|
||||
s := t.Format(time.RFC3339Nano)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||
// Issue #352: When formatting, use the precision of the underlying value
|
||||
precision := 64
|
||||
if in.Kind() == reflect.Float32 {
|
||||
precision = 32
|
||||
}
|
||||
|
||||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) nilv() {
|
||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||
implicit := tag == ""
|
||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||
e.emit()
|
||||
}
|
1095
api/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
generated
vendored
Normal file
1095
api/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
412
api/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
generated
vendored
Normal file
412
api/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
generated
vendored
Normal file
@ -0,0 +1,412 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Set the reader error and return 0.
|
||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||||
parser.error = yaml_READER_ERROR
|
||||
parser.problem = problem
|
||||
parser.problem_offset = offset
|
||||
parser.problem_value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Byte order marks.
|
||||
const (
|
||||
bom_UTF8 = "\xef\xbb\xbf"
|
||||
bom_UTF16LE = "\xff\xfe"
|
||||
bom_UTF16BE = "\xfe\xff"
|
||||
)
|
||||
|
||||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||||
// Ensure that we had enough bytes in the raw buffer.
|
||||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the encoding.
|
||||
buf := parser.raw_buffer
|
||||
pos := parser.raw_buffer_pos
|
||||
avail := len(buf) - pos
|
||||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||||
parser.encoding = yaml_UTF16LE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||||
parser.encoding = yaml_UTF16BE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
parser.raw_buffer_pos += 3
|
||||
parser.offset += 3
|
||||
} else {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update the raw buffer.
|
||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||||
size_read := 0
|
||||
|
||||
// Return if the raw buffer is full.
|
||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return on EOF.
|
||||
if parser.eof {
|
||||
return true
|
||||
}
|
||||
|
||||
// Move the remaining bytes in the raw buffer to the beginning.
|
||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||||
}
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||||
parser.raw_buffer_pos = 0
|
||||
|
||||
// Call the read handler to fill the buffer.
|
||||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||||
if err == io.EOF {
|
||||
parser.eof = true
|
||||
} else if err != nil {
|
||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure that the buffer contains at least `length` characters.
|
||||
// Return true on success, false on failure.
|
||||
//
|
||||
// The length is supposed to be significantly less that the buffer size.
|
||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
if parser.read_handler == nil {
|
||||
panic("read handler must be set")
|
||||
}
|
||||
|
||||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
||||
// The fact we need to do this is pretty awful, but the description above implies
|
||||
// for that to be the case, and there are tests
|
||||
|
||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
// [Go] ACTUALLY! Read the documentation of this function above.
|
||||
// This is just broken. To return true, we need to have the
|
||||
// given length in the buffer. Not doing that means every single
|
||||
// check that calls this function to make sure the buffer has a
|
||||
// given length is Go) panicking; or C) accessing invalid memory.
|
||||
//return true
|
||||
}
|
||||
|
||||
// Return if the buffer contains enough characters.
|
||||
if parser.unread >= length {
|
||||
return true
|
||||
}
|
||||
|
||||
// Determine the input encoding if it is not known yet.
|
||||
if parser.encoding == yaml_ANY_ENCODING {
|
||||
if !yaml_parser_determine_encoding(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Move the unread characters to the beginning of the buffer.
|
||||
buffer_len := len(parser.buffer)
|
||||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||||
buffer_len -= parser.buffer_pos
|
||||
parser.buffer_pos = 0
|
||||
} else if parser.buffer_pos == buffer_len {
|
||||
buffer_len = 0
|
||||
parser.buffer_pos = 0
|
||||
}
|
||||
|
||||
// Open the whole buffer for writing, and cut it before returning.
|
||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||||
|
||||
// Fill the buffer until it has enough characters.
|
||||
first := true
|
||||
for parser.unread < length {
|
||||
|
||||
// Fill the raw buffer if necessary.
|
||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return false
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
// Decode the raw buffer.
|
||||
inner:
|
||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||||
var value rune
|
||||
var width int
|
||||
|
||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||||
|
||||
// Decode the next character.
|
||||
switch parser.encoding {
|
||||
case yaml_UTF8_ENCODING:
|
||||
// Decode a UTF-8 character. Check RFC 3629
|
||||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||||
//
|
||||
// The following table (taken from the RFC) is used for
|
||||
// decoding.
|
||||
//
|
||||
// Char. number range | UTF-8 octet sequence
|
||||
// (hexadecimal) | (binary)
|
||||
// --------------------+------------------------------------
|
||||
// 0000 0000-0000 007F | 0xxxxxxx
|
||||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
//
|
||||
// Additionally, the characters in the range 0xD800-0xDFFF
|
||||
// are prohibited as they are reserved for use with UTF-16
|
||||
// surrogate pairs.
|
||||
|
||||
// Determine the length of the UTF-8 sequence.
|
||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
width = 1
|
||||
case octet&0xE0 == 0xC0:
|
||||
width = 2
|
||||
case octet&0xF0 == 0xE0:
|
||||
width = 3
|
||||
case octet&0xF8 == 0xF0:
|
||||
width = 4
|
||||
default:
|
||||
// The leading octet is invalid.
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid leading UTF-8 octet",
|
||||
parser.offset, int(octet))
|
||||
}
|
||||
|
||||
// Check if the raw buffer contains an incomplete character.
|
||||
if width > raw_unread {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-8 octet sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Decode the leading octet.
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
value = rune(octet & 0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
value = rune(octet & 0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
value = rune(octet & 0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
value = rune(octet & 0x07)
|
||||
default:
|
||||
value = 0
|
||||
}
|
||||
|
||||
// Check and decode the trailing octets.
|
||||
for k := 1; k < width; k++ {
|
||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||||
|
||||
// Check if the octet is valid.
|
||||
if (octet & 0xC0) != 0x80 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid trailing UTF-8 octet",
|
||||
parser.offset+k, int(octet))
|
||||
}
|
||||
|
||||
// Decode the octet.
|
||||
value = (value << 6) + rune(octet&0x3F)
|
||||
}
|
||||
|
||||
// Check the length of the sequence against the value.
|
||||
switch {
|
||||
case width == 1:
|
||||
case width == 2 && value >= 0x80:
|
||||
case width == 3 && value >= 0x800:
|
||||
case width == 4 && value >= 0x10000:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid length of a UTF-8 sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
|
||||
// Check the range of the value.
|
||||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid Unicode character",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||||
var low, high int
|
||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
low, high = 1, 0
|
||||
}
|
||||
|
||||
// The UTF-16 encoding is not as simple as one might
|
||||
// naively think. Check RFC 2781
|
||||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
||||
//
|
||||
// Normally, two subsequent bytes describe a Unicode
|
||||
// character. However a special technique (called a
|
||||
// surrogate pair) is used for specifying character
|
||||
// values larger than 0xFFFF.
|
||||
//
|
||||
// A surrogate pair consists of two pseudo-characters:
|
||||
// high surrogate area (0xD800-0xDBFF)
|
||||
// low surrogate area (0xDC00-0xDFFF)
|
||||
//
|
||||
// The following formulas are used for decoding
|
||||
// and encoding characters using surrogate pairs:
|
||||
//
|
||||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||||
// W1 = 110110yyyyyyyyyy
|
||||
// W2 = 110111xxxxxxxxxx
|
||||
//
|
||||
// where U is the character value, W1 is the high surrogate
|
||||
// area, W2 is the low surrogate area.
|
||||
|
||||
// Check for incomplete UTF-16 character.
|
||||
if raw_unread < 2 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 character",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the character.
|
||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||||
|
||||
// Check for unexpected low surrogate area.
|
||||
if value&0xFC00 == 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"unexpected low surrogate area",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Check for a high surrogate area.
|
||||
if value&0xFC00 == 0xD800 {
|
||||
width = 4
|
||||
|
||||
// Check for incomplete surrogate pair.
|
||||
if raw_unread < 4 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 surrogate pair",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the next character.
|
||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||||
|
||||
// Check for a low surrogate area.
|
||||
if value2&0xFC00 != 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"expected low surrogate area",
|
||||
parser.offset+2, int(value2))
|
||||
}
|
||||
|
||||
// Generate the value of the surrogate pair.
|
||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
|
||||
default:
|
||||
panic("impossible")
|
||||
}
|
||||
|
||||
// Check if the character is in the allowed range:
|
||||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||||
// | [#x10000-#x10FFFF] (32 bit)
|
||||
switch {
|
||||
case value == 0x09:
|
||||
case value == 0x0A:
|
||||
case value == 0x0D:
|
||||
case value >= 0x20 && value <= 0x7E:
|
||||
case value == 0x85:
|
||||
case value >= 0xA0 && value <= 0xD7FF:
|
||||
case value >= 0xE000 && value <= 0xFFFD:
|
||||
case value >= 0x10000 && value <= 0x10FFFF:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"control characters are not allowed",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Move the raw pointers.
|
||||
parser.raw_buffer_pos += width
|
||||
parser.offset += width
|
||||
|
||||
// Finally put the character into the buffer.
|
||||
if value <= 0x7F {
|
||||
// 0000 0000-0000 007F . 0xxxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(value)
|
||||
buffer_len += 1
|
||||
} else if value <= 0x7FF {
|
||||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 2
|
||||
} else if value <= 0xFFFF {
|
||||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 3
|
||||
} else {
|
||||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 4
|
||||
}
|
||||
|
||||
parser.unread++
|
||||
}
|
||||
|
||||
// On EOF, put NUL into the buffer and return.
|
||||
if parser.eof {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
parser.unread++
|
||||
break
|
||||
}
|
||||
}
|
||||
// [Go] Read the documentation of this function above. To return true,
|
||||
// we need to have the given length in the buffer. Not doing that means
|
||||
// every single check that calls this function to make sure the buffer
|
||||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
||||
// This happens here due to the EOF above breaking early.
|
||||
for buffer_len < length {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
}
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return true
|
||||
}
|
258
api/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
generated
vendored
Normal file
258
api/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type resolveMapItem struct {
|
||||
value interface{}
|
||||
tag string
|
||||
}
|
||||
|
||||
var resolveTable = make([]byte, 256)
|
||||
var resolveMap = make(map[string]resolveMapItem)
|
||||
|
||||
func init() {
|
||||
t := resolveTable
|
||||
t[int('+')] = 'S' // Sign
|
||||
t[int('-')] = 'S'
|
||||
for _, c := range "0123456789" {
|
||||
t[int(c)] = 'D' // Digit
|
||||
}
|
||||
for _, c := range "yYnNtTfFoO~" {
|
||||
t[int(c)] = 'M' // In map
|
||||
}
|
||||
t[int('.')] = '.' // Float (potentially in map)
|
||||
|
||||
var resolveMapList = []struct {
|
||||
v interface{}
|
||||
tag string
|
||||
l []string
|
||||
}{
|
||||
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
||||
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
||||
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
||||
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
||||
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
||||
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
||||
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
||||
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||
{"<<", yaml_MERGE_TAG, []string{"<<"}},
|
||||
}
|
||||
|
||||
m := resolveMap
|
||||
for _, item := range resolveMapList {
|
||||
for _, s := range item.l {
|
||||
m[s] = resolveMapItem{item.v, item.tag}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const longTagPrefix = "tag:yaml.org,2002:"
|
||||
|
||||
func shortTag(tag string) string {
|
||||
// TODO This can easily be made faster and produce less garbage.
|
||||
if strings.HasPrefix(tag, longTagPrefix) {
|
||||
return "!!" + tag[len(longTagPrefix):]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func longTag(tag string) string {
|
||||
if strings.HasPrefix(tag, "!!") {
|
||||
return longTagPrefix + tag[2:]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func resolvableTag(tag string) bool {
|
||||
switch tag {
|
||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
|
||||
|
||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
if !resolvableTag(tag) {
|
||||
return tag, in
|
||||
}
|
||||
|
||||
defer func() {
|
||||
switch tag {
|
||||
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
||||
return
|
||||
case yaml_FLOAT_TAG:
|
||||
if rtag == yaml_INT_TAG {
|
||||
switch v := out.(type) {
|
||||
case int64:
|
||||
rtag = yaml_FLOAT_TAG
|
||||
out = float64(v)
|
||||
return
|
||||
case int:
|
||||
rtag = yaml_FLOAT_TAG
|
||||
out = float64(v)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
||||
}()
|
||||
|
||||
// Any data is accepted as a !!str or !!binary.
|
||||
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||
hint := byte('N')
|
||||
if in != "" {
|
||||
hint = resolveTable[in[0]]
|
||||
}
|
||||
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
||||
// Handle things we can lookup in a map.
|
||||
if item, ok := resolveMap[in]; ok {
|
||||
return item.tag, item.value
|
||||
}
|
||||
|
||||
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||||
// are purposefully unsupported here. They're still quoted on
|
||||
// the way out for compatibility with other parser, though.
|
||||
|
||||
switch hint {
|
||||
case 'M':
|
||||
// We've already checked the map above.
|
||||
|
||||
case '.':
|
||||
// Not in the map, so maybe a normal float.
|
||||
floatv, err := strconv.ParseFloat(in, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
|
||||
case 'D', 'S':
|
||||
// Int, float, or timestamp.
|
||||
// Only try values as a timestamp if the value is unquoted or there's an explicit
|
||||
// !!timestamp tag.
|
||||
if tag == "" || tag == yaml_TIMESTAMP_TAG {
|
||||
t, ok := parseTimestamp(in)
|
||||
if ok {
|
||||
return yaml_TIMESTAMP_TAG, t
|
||||
}
|
||||
}
|
||||
|
||||
plain := strings.Replace(in, "_", "", -1)
|
||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain, 0, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, uintv
|
||||
}
|
||||
if yamlStyleFloat.MatchString(plain) {
|
||||
floatv, err := strconv.ParseFloat(plain, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(plain, "0b") {
|
||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, uintv
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0b") {
|
||||
intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
|
||||
if err == nil {
|
||||
if true || intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
||||
}
|
||||
}
|
||||
return yaml_STR_TAG, in
|
||||
}
|
||||
|
||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||
// as appropriate for the resulting length.
|
||||
func encodeBase64(s string) string {
|
||||
const lineLen = 70
|
||||
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||
lines := encLen/lineLen + 1
|
||||
buf := make([]byte, encLen*2+lines)
|
||||
in := buf[0:encLen]
|
||||
out := buf[encLen:]
|
||||
base64.StdEncoding.Encode(in, []byte(s))
|
||||
k := 0
|
||||
for i := 0; i < len(in); i += lineLen {
|
||||
j := i + lineLen
|
||||
if j > len(in) {
|
||||
j = len(in)
|
||||
}
|
||||
k += copy(out[k:], in[i:j])
|
||||
if lines > 1 {
|
||||
out[k] = '\n'
|
||||
k++
|
||||
}
|
||||
}
|
||||
return string(out[:k])
|
||||
}
|
||||
|
||||
// This is a subset of the formats allowed by the regular expression
|
||||
// defined at http://yaml.org/type/timestamp.html.
|
||||
var allowedTimestampFormats = []string{
|
||||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||||
"2006-1-2", // date only
|
||||
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
||||
// from the set of examples.
|
||||
}
|
||||
|
||||
// parseTimestamp parses s as a timestamp string and
|
||||
// returns the timestamp and reports whether it succeeded.
|
||||
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
||||
func parseTimestamp(s string) (time.Time, bool) {
|
||||
// TODO write code to check all the formats supported by
|
||||
// http://yaml.org/type/timestamp.html instead of using time.Parse.
|
||||
|
||||
// Quick check: all date formats start with YYYY-.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if c := s[i]; c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i != 4 || i == len(s) || s[i] != '-' {
|
||||
return time.Time{}, false
|
||||
}
|
||||
for _, format := range allowedTimestampFormats {
|
||||
if t, err := time.Parse(format, s); err == nil {
|
||||
return t, true
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
2711
api/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
generated
vendored
Normal file
2711
api/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
113
api/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
generated
vendored
Normal file
113
api/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type keyList []reflect.Value
|
||||
|
||||
func (l keyList) Len() int { return len(l) }
|
||||
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l keyList) Less(i, j int) bool {
|
||||
a := l[i]
|
||||
b := l[j]
|
||||
ak := a.Kind()
|
||||
bk := b.Kind()
|
||||
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
|
||||
a = a.Elem()
|
||||
ak = a.Kind()
|
||||
}
|
||||
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
|
||||
b = b.Elem()
|
||||
bk = b.Kind()
|
||||
}
|
||||
af, aok := keyFloat(a)
|
||||
bf, bok := keyFloat(b)
|
||||
if aok && bok {
|
||||
if af != bf {
|
||||
return af < bf
|
||||
}
|
||||
if ak != bk {
|
||||
return ak < bk
|
||||
}
|
||||
return numLess(a, b)
|
||||
}
|
||||
if ak != reflect.String || bk != reflect.String {
|
||||
return ak < bk
|
||||
}
|
||||
ar, br := []rune(a.String()), []rune(b.String())
|
||||
for i := 0; i < len(ar) && i < len(br); i++ {
|
||||
if ar[i] == br[i] {
|
||||
continue
|
||||
}
|
||||
al := unicode.IsLetter(ar[i])
|
||||
bl := unicode.IsLetter(br[i])
|
||||
if al && bl {
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
if al || bl {
|
||||
return bl
|
||||
}
|
||||
var ai, bi int
|
||||
var an, bn int64
|
||||
if ar[i] == '0' || br[i] == '0' {
|
||||
for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
|
||||
if ar[j] != '0' {
|
||||
an = 1
|
||||
bn = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
||||
an = an*10 + int64(ar[ai]-'0')
|
||||
}
|
||||
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
|
||||
bn = bn*10 + int64(br[bi]-'0')
|
||||
}
|
||||
if an != bn {
|
||||
return an < bn
|
||||
}
|
||||
if ai != bi {
|
||||
return ai < bi
|
||||
}
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
return len(ar) < len(br)
|
||||
}
|
||||
|
||||
// keyFloat returns a float value for v if it is a number/bool
|
||||
// and whether it is a number/bool or not.
|
||||
func keyFloat(v reflect.Value) (f float64, ok bool) {
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return float64(v.Int()), true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float(), true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return float64(v.Uint()), true
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
return 1, true
|
||||
}
|
||||
return 0, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// numLess returns whether a < b.
|
||||
// a and b must necessarily have the same kind.
|
||||
func numLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
}
|
||||
panic("not a number")
|
||||
}
|
26
api/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
generated
vendored
Normal file
26
api/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package yaml
|
||||
|
||||
// Set the writer error and return false.
|
||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
||||
emitter.error = yaml_WRITER_ERROR
|
||||
emitter.problem = problem
|
||||
return false
|
||||
}
|
||||
|
||||
// Flush the output buffer.
|
||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||||
if emitter.write_handler == nil {
|
||||
panic("write handler not set")
|
||||
}
|
||||
|
||||
// Check if the buffer is empty.
|
||||
if emitter.buffer_pos == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
return true
|
||||
}
|
478
api/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
generated
vendored
Normal file
478
api/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
generated
vendored
Normal file
@ -0,0 +1,478 @@
|
||||
// Package yaml implements YAML support for the Go language.
|
||||
//
|
||||
// Source code and other details for the project are available at GitHub:
|
||||
//
|
||||
// https://github.com/go-yaml/yaml
|
||||
//
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MapSlice encodes and decodes as a YAML map.
|
||||
// The order of keys is preserved when encoding and decoding.
|
||||
type MapSlice []MapItem
|
||||
|
||||
// MapItem is an item in a MapSlice.
|
||||
type MapItem struct {
|
||||
Key, Value interface{}
|
||||
}
|
||||
|
||||
// The Unmarshaler interface may be implemented by types to customize their
|
||||
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
|
||||
// method receives a function that may be called to unmarshal the original
|
||||
// YAML value into a field or variable. It is safe to call the unmarshal
|
||||
// function parameter more than once if necessary.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
}
|
||||
|
||||
// The Marshaler interface may be implemented by types to customize their
|
||||
// behavior when being marshaled into a YAML document. The returned value
|
||||
// is marshaled in place of the original value implementing Marshaler.
|
||||
//
|
||||
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
||||
// and returns with the provided error.
|
||||
type Marshaler interface {
|
||||
MarshalYAML() (interface{}, error)
|
||||
}
|
||||
|
||||
// Unmarshal decodes the first document found within the in byte slice
|
||||
// and assigns decoded values into the out value.
|
||||
//
|
||||
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||
// values. If an internal pointer within a struct is not initialized,
|
||||
// the yaml package will initialize it if necessary for unmarshalling
|
||||
// the provided data. The out parameter must not be nil.
|
||||
//
|
||||
// The type of the decoded values should be compatible with the respective
|
||||
// values in out. If one or more values cannot be decoded due to a type
|
||||
// mismatches, decoding continues partially until the end of the YAML
|
||||
// content, and a *yaml.TypeError is returned with details for all
|
||||
// missed values.
|
||||
//
|
||||
// Struct fields are only unmarshalled if they are exported (have an
|
||||
// upper case first letter), and are unmarshalled using the field name
|
||||
// lowercased as the default key. Custom keys may be defined via the
|
||||
// "yaml" name in the field tag: the content preceding the first comma
|
||||
// is used as the key, and the following comma-separated options are
|
||||
// used to tweak the marshalling process (see Marshal).
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// var t T
|
||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
//
|
||||
// See the documentation of Marshal for the format of tags and a list of
|
||||
// supported tag options.
|
||||
//
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, false)
|
||||
}
|
||||
|
||||
// UnmarshalStrict is like Unmarshal except that any fields that are found
|
||||
// in the data that do not have corresponding struct members, or mapping
|
||||
// keys that are duplicates, will result in
|
||||
// an error.
|
||||
func UnmarshalStrict(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, true)
|
||||
}
|
||||
|
||||
// A Decoder reads and decodes YAML values from an input stream.
|
||||
type Decoder struct {
|
||||
strict bool
|
||||
parser *parser
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may read
|
||||
// data from r beyond the YAML values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
parser: newParserFromReader(r),
|
||||
}
|
||||
}
|
||||
|
||||
// SetStrict sets whether strict decoding behaviour is enabled when
|
||||
// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
|
||||
func (dec *Decoder) SetStrict(strict bool) {
|
||||
dec.strict = strict
|
||||
}
|
||||
|
||||
// Decode reads the next YAML-encoded value from its input
|
||||
// and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about the
|
||||
// conversion of YAML into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) (err error) {
|
||||
d := newDecoder(dec.strict)
|
||||
defer handleErr(&err)
|
||||
node := dec.parser.parse()
|
||||
if node == nil {
|
||||
return io.EOF
|
||||
}
|
||||
out := reflect.ValueOf(v)
|
||||
if out.Kind() == reflect.Ptr && !out.IsNil() {
|
||||
out = out.Elem()
|
||||
}
|
||||
d.unmarshal(node, out)
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
defer handleErr(&err)
|
||||
d := newDecoder(strict)
|
||||
p := newParser(in)
|
||||
defer p.destroy()
|
||||
node := p.parse()
|
||||
if node != nil {
|
||||
v := reflect.ValueOf(out)
|
||||
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
d.unmarshal(node, v)
|
||||
}
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal serializes the value provided into a YAML document. The structure
|
||||
// of the generated document will reflect the structure of the value itself.
|
||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||
//
|
||||
// Struct fields are only marshalled if they are exported (have an upper case
|
||||
// first letter), and are marshalled using the field name lowercased as the
|
||||
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||
// tag: the content preceding the first comma is used as the key, and the
|
||||
// following comma-separated options are used to tweak the marshalling process.
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// The field tag format accepted is:
|
||||
//
|
||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
// Zero valued structs will be omitted if all their public
|
||||
// fields are zero, unless they implement an IsZero
|
||||
// method (see the IsZeroer interface type), in which
|
||||
// case the field will be excluded if IsZero returns true.
|
||||
//
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the yaml keys of other struct fields.
|
||||
//
|
||||
// In addition, if the key is "-", the field is ignored.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := newEncoder()
|
||||
defer e.destroy()
|
||||
e.marshalDoc("", reflect.ValueOf(in))
|
||||
e.finish()
|
||||
out = e.out
|
||||
return
|
||||
}
|
||||
|
||||
// An Encoder writes YAML values to an output stream.
|
||||
type Encoder struct {
|
||||
encoder *encoder
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
// The Encoder should be closed after use to flush all data
|
||||
// to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
encoder: newEncoderWithWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes the YAML encoding of v to the stream.
|
||||
// If multiple items are encoded to the stream, the
|
||||
// second and subsequent document will be preceded
|
||||
// with a "---" document separator, but the first will not.
|
||||
//
|
||||
// See the documentation for Marshal for details about the conversion of Go
|
||||
// values to YAML.
|
||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.marshalDoc("", reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the encoder by writing any remaining data.
|
||||
// It does not write a stream terminating string "...".
|
||||
func (e *Encoder) Close() (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleErr(err *error) {
|
||||
if v := recover(); v != nil {
|
||||
if e, ok := v.(yamlError); ok {
|
||||
*err = e.err
|
||||
} else {
|
||||
panic(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type yamlError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func fail(err error) {
|
||||
panic(yamlError{err})
|
||||
}
|
||||
|
||||
func failf(format string, args ...interface{}) {
|
||||
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
||||
}
|
||||
|
||||
// A TypeError is returned by Unmarshal when one or more fields in
|
||||
// the YAML document cannot be properly decoded into the requested
|
||||
// types. When this error is returned, the value is still
|
||||
// unmarshaled partially.
|
||||
type TypeError struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
// The code in this section was copied from mgo/bson.
|
||||
|
||||
// structInfo holds details for the serialization of fields of
|
||||
// a given struct.
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
|
||||
// InlineMap is the number of the field in the struct that
|
||||
// contains an ,inline map, or -1 if there's none.
|
||||
InlineMap int
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
Flow bool
|
||||
// Id holds the unique field identifier, so we can cheaply
|
||||
// check for field duplicates without maintaining an extra map.
|
||||
Id int
|
||||
|
||||
// Inline holds the field index if the field is part of an inlined struct.
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var fieldMapMutex sync.RWMutex
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
fieldMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
fieldMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" && !field.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("yaml")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "flow":
|
||||
info.Flow = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
finfo.Id = len(fieldsList)
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
||||
return nil, errors.New("Option ,inline needs a struct value field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
info.Id = len(fieldsList)
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
|
||||
sinfo = &structInfo{
|
||||
FieldsMap: fieldsMap,
|
||||
FieldsList: fieldsList,
|
||||
InlineMap: inlineMap,
|
||||
}
|
||||
|
||||
fieldMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
fieldMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
// IsZeroer is used to check whether an object is zero to
|
||||
// determine whether it should be omitted when marshaling
|
||||
// with the omitempty flag. One notable implementation
|
||||
// is time.Time.
|
||||
type IsZeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
kind := v.Kind()
|
||||
if z, ok := v.Interface().(IsZeroer); ok {
|
||||
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
|
||||
return true
|
||||
}
|
||||
return z.IsZero()
|
||||
}
|
||||
switch kind {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
for i := v.NumField() - 1; i >= 0; i-- {
|
||||
if vt.Field(i).PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FutureLineWrap globally disables line wrapping when encoding long strings.
|
||||
// This is a temporary and thus deprecated method introduced to faciliate
|
||||
// migration towards v3, which offers more control of line lengths on
|
||||
// individual encodings, and has a default matching the behavior introduced
|
||||
// by this function.
|
||||
//
|
||||
// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
|
||||
// in v2.4.0, at which point this function was introduced to help migration.
|
||||
func FutureLineWrap() {
|
||||
disableLineWrapping = true
|
||||
}
|
739
api/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
generated
vendored
Normal file
739
api/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
generated
vendored
Normal file
@ -0,0 +1,739 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// The version directive data.
|
||||
type yaml_version_directive_t struct {
|
||||
major int8 // The major version number.
|
||||
minor int8 // The minor version number.
|
||||
}
|
||||
|
||||
// The tag directive data.
|
||||
type yaml_tag_directive_t struct {
|
||||
handle []byte // The tag handle.
|
||||
prefix []byte // The tag prefix.
|
||||
}
|
||||
|
||||
type yaml_encoding_t int
|
||||
|
||||
// The stream encoding.
|
||||
const (
|
||||
// Let the parser choose the encoding.
|
||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
||||
|
||||
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
||||
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
||||
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
||||
)
|
||||
|
||||
type yaml_break_t int
|
||||
|
||||
// Line break types.
|
||||
const (
|
||||
// Let the parser choose the break type.
|
||||
yaml_ANY_BREAK yaml_break_t = iota
|
||||
|
||||
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
||||
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
||||
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
||||
)
|
||||
|
||||
type yaml_error_type_t int
|
||||
|
||||
// Many bad things could happen with the parser and emitter.
|
||||
const (
|
||||
// No error is produced.
|
||||
yaml_NO_ERROR yaml_error_type_t = iota
|
||||
|
||||
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
||||
yaml_READER_ERROR // Cannot read or decode the input stream.
|
||||
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
||||
yaml_PARSER_ERROR // Cannot parse the input stream.
|
||||
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
||||
yaml_WRITER_ERROR // Cannot write to the output stream.
|
||||
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
||||
)
|
||||
|
||||
// The pointer position.
|
||||
type yaml_mark_t struct {
|
||||
index int // The position index.
|
||||
line int // The position line.
|
||||
column int // The position column.
|
||||
}
|
||||
|
||||
// Node Styles
|
||||
|
||||
type yaml_style_t int8
|
||||
|
||||
type yaml_scalar_style_t yaml_style_t
|
||||
|
||||
// Scalar styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
||||
|
||||
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
|
||||
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
||||
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
||||
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
||||
)
|
||||
|
||||
type yaml_sequence_style_t yaml_style_t
|
||||
|
||||
// Sequence styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
||||
|
||||
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
||||
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
||||
)
|
||||
|
||||
type yaml_mapping_style_t yaml_style_t
|
||||
|
||||
// Mapping styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
||||
|
||||
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
||||
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
||||
)
|
||||
|
||||
// Tokens
|
||||
|
||||
type yaml_token_type_t int
|
||||
|
||||
// Token types.
|
||||
const (
|
||||
// An empty token.
|
||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
||||
|
||||
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
||||
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
||||
|
||||
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
||||
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
||||
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
||||
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
||||
|
||||
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
||||
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
||||
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
||||
|
||||
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
||||
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
||||
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
||||
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
||||
|
||||
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
||||
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
||||
yaml_KEY_TOKEN // A KEY token.
|
||||
yaml_VALUE_TOKEN // A VALUE token.
|
||||
|
||||
yaml_ALIAS_TOKEN // An ALIAS token.
|
||||
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
||||
yaml_TAG_TOKEN // A TAG token.
|
||||
yaml_SCALAR_TOKEN // A SCALAR token.
|
||||
)
|
||||
|
||||
func (tt yaml_token_type_t) String() string {
|
||||
switch tt {
|
||||
case yaml_NO_TOKEN:
|
||||
return "yaml_NO_TOKEN"
|
||||
case yaml_STREAM_START_TOKEN:
|
||||
return "yaml_STREAM_START_TOKEN"
|
||||
case yaml_STREAM_END_TOKEN:
|
||||
return "yaml_STREAM_END_TOKEN"
|
||||
case yaml_VERSION_DIRECTIVE_TOKEN:
|
||||
return "yaml_VERSION_DIRECTIVE_TOKEN"
|
||||
case yaml_TAG_DIRECTIVE_TOKEN:
|
||||
return "yaml_TAG_DIRECTIVE_TOKEN"
|
||||
case yaml_DOCUMENT_START_TOKEN:
|
||||
return "yaml_DOCUMENT_START_TOKEN"
|
||||
case yaml_DOCUMENT_END_TOKEN:
|
||||
return "yaml_DOCUMENT_END_TOKEN"
|
||||
case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
||||
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
||||
case yaml_BLOCK_MAPPING_START_TOKEN:
|
||||
return "yaml_BLOCK_MAPPING_START_TOKEN"
|
||||
case yaml_BLOCK_END_TOKEN:
|
||||
return "yaml_BLOCK_END_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_START_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_END_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
||||
case yaml_FLOW_MAPPING_START_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_START_TOKEN"
|
||||
case yaml_FLOW_MAPPING_END_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_END_TOKEN"
|
||||
case yaml_BLOCK_ENTRY_TOKEN:
|
||||
return "yaml_BLOCK_ENTRY_TOKEN"
|
||||
case yaml_FLOW_ENTRY_TOKEN:
|
||||
return "yaml_FLOW_ENTRY_TOKEN"
|
||||
case yaml_KEY_TOKEN:
|
||||
return "yaml_KEY_TOKEN"
|
||||
case yaml_VALUE_TOKEN:
|
||||
return "yaml_VALUE_TOKEN"
|
||||
case yaml_ALIAS_TOKEN:
|
||||
return "yaml_ALIAS_TOKEN"
|
||||
case yaml_ANCHOR_TOKEN:
|
||||
return "yaml_ANCHOR_TOKEN"
|
||||
case yaml_TAG_TOKEN:
|
||||
return "yaml_TAG_TOKEN"
|
||||
case yaml_SCALAR_TOKEN:
|
||||
return "yaml_SCALAR_TOKEN"
|
||||
}
|
||||
return "<unknown token>"
|
||||
}
|
||||
|
||||
// The token structure.
|
||||
type yaml_token_t struct {
|
||||
// The token type.
|
||||
typ yaml_token_type_t
|
||||
|
||||
// The start/end of the token.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The alias/anchor/scalar value or tag/tag directive handle
|
||||
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
||||
value []byte
|
||||
|
||||
// The tag suffix (for yaml_TAG_TOKEN).
|
||||
suffix []byte
|
||||
|
||||
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
||||
prefix []byte
|
||||
|
||||
// The scalar style (for yaml_SCALAR_TOKEN).
|
||||
style yaml_scalar_style_t
|
||||
|
||||
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
||||
major, minor int8
|
||||
}
|
||||
|
||||
// Events
|
||||
|
||||
type yaml_event_type_t int8
|
||||
|
||||
// Event types.
|
||||
const (
|
||||
// An empty event.
|
||||
yaml_NO_EVENT yaml_event_type_t = iota
|
||||
|
||||
yaml_STREAM_START_EVENT // A STREAM-START event.
|
||||
yaml_STREAM_END_EVENT // A STREAM-END event.
|
||||
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
||||
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
||||
yaml_ALIAS_EVENT // An ALIAS event.
|
||||
yaml_SCALAR_EVENT // A SCALAR event.
|
||||
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
||||
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
||||
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
||||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||||
)
|
||||
|
||||
var eventStrings = []string{
|
||||
yaml_NO_EVENT: "none",
|
||||
yaml_STREAM_START_EVENT: "stream start",
|
||||
yaml_STREAM_END_EVENT: "stream end",
|
||||
yaml_DOCUMENT_START_EVENT: "document start",
|
||||
yaml_DOCUMENT_END_EVENT: "document end",
|
||||
yaml_ALIAS_EVENT: "alias",
|
||||
yaml_SCALAR_EVENT: "scalar",
|
||||
yaml_SEQUENCE_START_EVENT: "sequence start",
|
||||
yaml_SEQUENCE_END_EVENT: "sequence end",
|
||||
yaml_MAPPING_START_EVENT: "mapping start",
|
||||
yaml_MAPPING_END_EVENT: "mapping end",
|
||||
}
|
||||
|
||||
func (e yaml_event_type_t) String() string {
|
||||
if e < 0 || int(e) >= len(eventStrings) {
|
||||
return fmt.Sprintf("unknown event %d", e)
|
||||
}
|
||||
return eventStrings[e]
|
||||
}
|
||||
|
||||
// The event structure.
|
||||
type yaml_event_t struct {
|
||||
|
||||
// The event type.
|
||||
typ yaml_event_type_t
|
||||
|
||||
// The start and end of the event.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The document encoding (for yaml_STREAM_START_EVENT).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
||||
tag_directives []yaml_tag_directive_t
|
||||
|
||||
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
||||
anchor []byte
|
||||
|
||||
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
tag []byte
|
||||
|
||||
// The scalar value (for yaml_SCALAR_EVENT).
|
||||
value []byte
|
||||
|
||||
// Is the document start/end indicator implicit, or the tag optional?
|
||||
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
||||
implicit bool
|
||||
|
||||
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
||||
quoted_implicit bool
|
||||
|
||||
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
style yaml_style_t
|
||||
}
|
||||
|
||||
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
||||
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
||||
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
||||
|
||||
// Nodes
|
||||
|
||||
const (
|
||||
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
||||
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
||||
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
||||
|
||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||
|
||||
// Not in original libyaml.
|
||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||
|
||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
||||
)
|
||||
|
||||
type yaml_node_type_t int
|
||||
|
||||
// Node types.
|
||||
const (
|
||||
// An empty node.
|
||||
yaml_NO_NODE yaml_node_type_t = iota
|
||||
|
||||
yaml_SCALAR_NODE // A scalar node.
|
||||
yaml_SEQUENCE_NODE // A sequence node.
|
||||
yaml_MAPPING_NODE // A mapping node.
|
||||
)
|
||||
|
||||
// An element of a sequence node.
|
||||
type yaml_node_item_t int
|
||||
|
||||
// An element of a mapping node.
|
||||
type yaml_node_pair_t struct {
|
||||
key int // The key of the element.
|
||||
value int // The value of the element.
|
||||
}
|
||||
|
||||
// The node structure.
|
||||
type yaml_node_t struct {
|
||||
typ yaml_node_type_t // The node type.
|
||||
tag []byte // The node tag.
|
||||
|
||||
// The node data.
|
||||
|
||||
// The scalar parameters (for yaml_SCALAR_NODE).
|
||||
scalar struct {
|
||||
value []byte // The scalar value.
|
||||
length int // The length of the scalar value.
|
||||
style yaml_scalar_style_t // The scalar style.
|
||||
}
|
||||
|
||||
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
||||
sequence struct {
|
||||
items_data []yaml_node_item_t // The stack of sequence items.
|
||||
style yaml_sequence_style_t // The sequence style.
|
||||
}
|
||||
|
||||
// The mapping parameters (for yaml_MAPPING_NODE).
|
||||
mapping struct {
|
||||
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
||||
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
||||
pairs_end *yaml_node_pair_t // The end of the stack.
|
||||
pairs_top *yaml_node_pair_t // The top of the stack.
|
||||
style yaml_mapping_style_t // The mapping style.
|
||||
}
|
||||
|
||||
start_mark yaml_mark_t // The beginning of the node.
|
||||
end_mark yaml_mark_t // The end of the node.
|
||||
|
||||
}
|
||||
|
||||
// The document structure.
|
||||
type yaml_document_t struct {
|
||||
|
||||
// The document nodes.
|
||||
nodes []yaml_node_t
|
||||
|
||||
// The version directive.
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives.
|
||||
tag_directives_data []yaml_tag_directive_t
|
||||
tag_directives_start int // The beginning of the tag directives list.
|
||||
tag_directives_end int // The end of the tag directives list.
|
||||
|
||||
start_implicit int // Is the document start indicator implicit?
|
||||
end_implicit int // Is the document end indicator implicit?
|
||||
|
||||
// The start/end of the document.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
}
|
||||
|
||||
// The prototype of a read handler.
|
||||
//
|
||||
// The read handler is called when the parser needs to read more bytes from the
|
||||
// source. The handler should write not more than size bytes to the buffer.
|
||||
// The number of written bytes should be set to the size_read variable.
|
||||
//
|
||||
// [in,out] data A pointer to an application data specified by
|
||||
// yaml_parser_set_input().
|
||||
// [out] buffer The buffer to write the data from the source.
|
||||
// [in] size The size of the buffer.
|
||||
// [out] size_read The actual number of bytes read from the source.
|
||||
//
|
||||
// On success, the handler should return 1. If the handler failed,
|
||||
// the returned value should be 0. On EOF, the handler should set the
|
||||
// size_read to 0 and return 1.
|
||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
||||
|
||||
// This structure holds information about a potential simple key.
|
||||
type yaml_simple_key_t struct {
|
||||
possible bool // Is a simple key possible?
|
||||
required bool // Is a simple key required?
|
||||
token_number int // The number of the token.
|
||||
mark yaml_mark_t // The position mark.
|
||||
}
|
||||
|
||||
// The states of the parser.
|
||||
type yaml_parser_state_t int
|
||||
|
||||
const (
|
||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
||||
|
||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
||||
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
||||
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
||||
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
||||
yaml_PARSE_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
func (ps yaml_parser_state_t) String() string {
|
||||
switch ps {
|
||||
case yaml_PARSE_STREAM_START_STATE:
|
||||
return "yaml_PARSE_STREAM_START_STATE"
|
||||
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
||||
case yaml_PARSE_DOCUMENT_END_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_END_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
||||
case yaml_PARSE_FLOW_NODE_STATE:
|
||||
return "yaml_PARSE_FLOW_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
||||
case yaml_PARSE_END_STATE:
|
||||
return "yaml_PARSE_END_STATE"
|
||||
}
|
||||
return "<unknown parser state>"
|
||||
}
|
||||
|
||||
// This structure holds aliases data.
|
||||
type yaml_alias_data_t struct {
|
||||
anchor []byte // The anchor.
|
||||
index int // The node id.
|
||||
mark yaml_mark_t // The anchor mark.
|
||||
}
|
||||
|
||||
// The parser structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the
|
||||
// yaml_parser_ family of functions.
|
||||
type yaml_parser_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
|
||||
problem string // Error description.
|
||||
|
||||
// The byte about which the problem occurred.
|
||||
problem_offset int
|
||||
problem_value int
|
||||
problem_mark yaml_mark_t
|
||||
|
||||
// The error context.
|
||||
context string
|
||||
context_mark yaml_mark_t
|
||||
|
||||
// Reader stuff
|
||||
|
||||
read_handler yaml_read_handler_t // Read handler.
|
||||
|
||||
input_reader io.Reader // File input data.
|
||||
input []byte // String input data.
|
||||
input_pos int
|
||||
|
||||
eof bool // EOF flag
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
unread int // The number of unread characters in the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The input encoding.
|
||||
|
||||
offset int // The offset of the current position (in bytes).
|
||||
mark yaml_mark_t // The mark of the current position.
|
||||
|
||||
// Scanner stuff
|
||||
|
||||
stream_start_produced bool // Have we started to scan the input stream?
|
||||
stream_end_produced bool // Have we reached the end of the input stream?
|
||||
|
||||
flow_level int // The number of unclosed '[' and '{' indicators.
|
||||
|
||||
tokens []yaml_token_t // The tokens queue.
|
||||
tokens_head int // The head of the tokens queue.
|
||||
tokens_parsed int // The number of tokens fetched from the queue.
|
||||
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
||||
|
||||
indent int // The current indentation level.
|
||||
indents []int // The indentation levels stack.
|
||||
|
||||
simple_key_allowed bool // May a simple key occur at the current position?
|
||||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||||
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
|
||||
|
||||
// Parser stuff
|
||||
|
||||
state yaml_parser_state_t // The current parser state.
|
||||
states []yaml_parser_state_t // The parser states stack.
|
||||
marks []yaml_mark_t // The stack of marks.
|
||||
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
aliases []yaml_alias_data_t // The alias data.
|
||||
|
||||
document *yaml_document_t // The currently parsed document.
|
||||
}
|
||||
|
||||
// Emitter Definitions
|
||||
|
||||
// The prototype of a write handler.
|
||||
//
|
||||
// The write handler is called when the emitter needs to flush the accumulated
|
||||
// characters to the output. The handler should write @a size bytes of the
|
||||
// @a buffer to the output.
|
||||
//
|
||||
// @param[in,out] data A pointer to an application data specified by
|
||||
// yaml_emitter_set_output().
|
||||
// @param[in] buffer The buffer with bytes to be written.
|
||||
// @param[in] size The size of the buffer.
|
||||
//
|
||||
// @returns On success, the handler should return @c 1. If the handler failed,
|
||||
// the returned value should be @c 0.
|
||||
//
|
||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||||
|
||||
type yaml_emitter_state_t int
|
||||
|
||||
// The emitter states.
|
||||
const (
|
||||
// Expect STREAM-START.
|
||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
||||
|
||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
||||
yaml_EMIT_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
// The emitter structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the @c yaml_emitter_
|
||||
// family of functions.
|
||||
type yaml_emitter_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
problem string // Error description.
|
||||
|
||||
// Writer stuff
|
||||
|
||||
write_handler yaml_write_handler_t // Write handler.
|
||||
|
||||
output_buffer *[]byte // String output data.
|
||||
output_writer io.Writer // File output data.
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The stream encoding.
|
||||
|
||||
// Emitter stuff
|
||||
|
||||
canonical bool // If the output is in the canonical style?
|
||||
best_indent int // The number of indentation spaces.
|
||||
best_width int // The preferred width of the output lines.
|
||||
unicode bool // Allow unescaped non-ASCII characters?
|
||||
line_break yaml_break_t // The preferred line break.
|
||||
|
||||
state yaml_emitter_state_t // The current emitter state.
|
||||
states []yaml_emitter_state_t // The stack of states.
|
||||
|
||||
events []yaml_event_t // The event queue.
|
||||
events_head int // The head of the event queue.
|
||||
|
||||
indents []int // The stack of indentation levels.
|
||||
|
||||
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
||||
|
||||
indent int // The current indentation level.
|
||||
|
||||
flow_level int // The current flow level.
|
||||
|
||||
root_context bool // Is it the document root context?
|
||||
sequence_context bool // Is it a sequence context?
|
||||
mapping_context bool // Is it a mapping context?
|
||||
simple_key_context bool // Is it a simple mapping key context?
|
||||
|
||||
line int // The current line.
|
||||
column int // The current column.
|
||||
whitespace bool // If the last character was a whitespace?
|
||||
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
||||
open_ended bool // If an explicit document end is required?
|
||||
|
||||
// Anchor analysis.
|
||||
anchor_data struct {
|
||||
anchor []byte // The anchor value.
|
||||
alias bool // Is it an alias?
|
||||
}
|
||||
|
||||
// Tag analysis.
|
||||
tag_data struct {
|
||||
handle []byte // The tag handle.
|
||||
suffix []byte // The tag suffix.
|
||||
}
|
||||
|
||||
// Scalar analysis.
|
||||
scalar_data struct {
|
||||
value []byte // The scalar value.
|
||||
multiline bool // Does the scalar contain line breaks?
|
||||
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
||||
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
||||
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
||||
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
||||
style yaml_scalar_style_t // The output style.
|
||||
}
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
opened bool // If the stream was already opened?
|
||||
closed bool // If the stream was already closed?
|
||||
|
||||
// The information associated with the document nodes.
|
||||
anchors *struct {
|
||||
references int // The number of references.
|
||||
anchor int // The anchor id.
|
||||
serialized bool // If the node has been emitted?
|
||||
}
|
||||
|
||||
last_anchor_id int // The last assigned anchor id.
|
||||
|
||||
document *yaml_document_t // The currently emitted document.
|
||||
}
|
173
api/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
generated
vendored
Normal file
173
api/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
package yaml
|
||||
|
||||
const (
|
||||
// The size of the input raw buffer.
|
||||
input_raw_buffer_size = 512
|
||||
|
||||
// The size of the input buffer.
|
||||
// It should be possible to decode the whole raw buffer.
|
||||
input_buffer_size = input_raw_buffer_size * 3
|
||||
|
||||
// The size of the output buffer.
|
||||
output_buffer_size = 128
|
||||
|
||||
// The size of the output raw buffer.
|
||||
// It should be possible to encode the whole output buffer.
|
||||
output_raw_buffer_size = (output_buffer_size*2 + 2)
|
||||
|
||||
// The size of other stacks and queues.
|
||||
initial_stack_size = 16
|
||||
initial_queue_size = 16
|
||||
initial_string_size = 16
|
||||
)
|
||||
|
||||
// Check if the character at the specified position is an alphabetical
|
||||
// character, a digit, '_', or '-'.
|
||||
func is_alpha(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a digit.
|
||||
func is_digit(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9'
|
||||
}
|
||||
|
||||
// Get the value of a digit.
|
||||
func as_digit(b []byte, i int) int {
|
||||
return int(b[i]) - '0'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a hex-digit.
|
||||
func is_hex(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
||||
}
|
||||
|
||||
// Get the value of a hex-digit.
|
||||
func as_hex(b []byte, i int) int {
|
||||
bi := b[i]
|
||||
if bi >= 'A' && bi <= 'F' {
|
||||
return int(bi) - 'A' + 10
|
||||
}
|
||||
if bi >= 'a' && bi <= 'f' {
|
||||
return int(bi) - 'a' + 10
|
||||
}
|
||||
return int(bi) - '0'
|
||||
}
|
||||
|
||||
// Check if the character is ASCII.
|
||||
func is_ascii(b []byte, i int) bool {
|
||||
return b[i] <= 0x7F
|
||||
}
|
||||
|
||||
// Check if the character at the start of the buffer can be printed unescaped.
|
||||
func is_printable(b []byte, i int) bool {
|
||||
return ((b[i] == 0x0A) || // . == #x0A
|
||||
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
||||
(b[i] == 0xEE) ||
|
||||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is NUL.
|
||||
func is_z(b []byte, i int) bool {
|
||||
return b[i] == 0x00
|
||||
}
|
||||
|
||||
// Check if the beginning of the buffer is a BOM.
|
||||
func is_bom(b []byte, i int) bool {
|
||||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is space.
|
||||
func is_space(b []byte, i int) bool {
|
||||
return b[i] == ' '
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is tab.
|
||||
func is_tab(b []byte, i int) bool {
|
||||
return b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is blank (space or tab).
|
||||
func is_blank(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_tab(b, i)
|
||||
return b[i] == ' ' || b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a line break.
|
||||
func is_break(b []byte, i int) bool {
|
||||
return (b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
||||
}
|
||||
|
||||
func is_crlf(b []byte, i int) bool {
|
||||
return b[i] == '\r' && b[i+1] == '\n'
|
||||
}
|
||||
|
||||
// Check if the character is a line break or NUL.
|
||||
func is_breakz(b []byte, i int) bool {
|
||||
//return is_break(b, i) || is_z(b, i)
|
||||
return ( // is_break:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
// is_z:
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, or NUL.
|
||||
func is_spacez(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_breakz(b, i)
|
||||
return ( // is_space:
|
||||
b[i] == ' ' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, tab, or NUL.
|
||||
func is_blankz(b []byte, i int) bool {
|
||||
//return is_blank(b, i) || is_breakz(b, i)
|
||||
return ( // is_blank:
|
||||
b[i] == ' ' || b[i] == '\t' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Determine the width of the character.
|
||||
func width(b byte) int {
|
||||
// Don't replace these by a switch without first
|
||||
// confirming that it is being inlined.
|
||||
if b&0x80 == 0x00 {
|
||||
return 1
|
||||
}
|
||||
if b&0xE0 == 0xC0 {
|
||||
return 2
|
||||
}
|
||||
if b&0xF0 == 0xE0 {
|
||||
return 3
|
||||
}
|
||||
if b&0xF8 == 0xF0 {
|
||||
return 4
|
||||
}
|
||||
return 0
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user